aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLuca Coelho <luciano.coelho@intel.com>2016-05-10 03:30:56 -0400
committerLuca Coelho <luciano.coelho@intel.com>2016-05-10 03:30:56 -0400
commitbae6692c24236d0203f88a444986d86437a858fa (patch)
tree0f9bee1250af3046fa46049736b615b81e60f56e /drivers/net
parent46167a8fd4248533ad15867e6988ff20e76de641 (diff)
parent57fbcce37be7c1d2622b56587c10ade00e96afa3 (diff)
Merge tag 'mac80211-next-for-davem-2016-04-13' of git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next into master
To synchronize with Kalle, here's just a big change that affects all drivers - removing the duplicated enum ieee80211_band and replacing it by enum nl80211_band. On top of that, just a small documentation update.
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/dsa/bcm_sf2.c25
-rw-r--r--drivers/net/dsa/mv88e6131.c11
-rw-r--r--drivers/net/dsa/mv88e6171.c2
-rw-r--r--drivers/net/dsa/mv88e6352.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx.c263
-rw-r--r--drivers/net/dsa/mv88e6xxx.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c269
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c171
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h433
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c43
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h6
-rw-r--r--drivers/net/ethernet/cadence/macb.c221
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c100
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c464
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h310
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h217
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c104
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c96
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c45
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c92
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c196
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c48
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c251
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/intel/Kconfig80
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h14
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c223
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c85
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c72
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c24
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c1012
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h232
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c163
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c71
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h79
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c342
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c67
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h45
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h43
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h45
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c73
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h27
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c91
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c522
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h119
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c685
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c149
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c40
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c18
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h534
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c360
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c480
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c903
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/renesas/ravb.h204
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c268
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c17
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h255
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c407
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c396
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c354
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c225
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c349
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c675
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c98
-rw-r--r--drivers/net/geneve.c39
-rw-r--r--drivers/net/hyperv/hyperv_net.h7
-rw-r--r--drivers/net/hyperv/netvsc_drv.c5
-rw-r--r--drivers/net/hyperv/rndis_filter.c16
-rw-r--r--drivers/net/irda/Kconfig7
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/sh_irda.c875
-rw-r--r--drivers/net/phy/at803x.c10
-rw-r--r--drivers/net/phy/bcm7xxx.c4
-rw-r--r--drivers/net/phy/mdio-sun4i.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/rionet.c277
-rw-r--r--drivers/net/team/team.c5
-rw-r--r--drivers/net/tun.c13
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/lan78xx.c49
-rw-r--r--drivers/net/usb/plusb.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vxlan.c249
-rw-r--r--drivers/net/wireless/admtek/adm8211.c4
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c4
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c44
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c156
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h43
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c100
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h55
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c716
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c291
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c614
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c106
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c37
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h23
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c140
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h54
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h10
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c22
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c27
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar956x_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c138
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c267
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h90
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c12
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c18
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c8
-rw-r--r--drivers/net/wireless/ath/regd.c16
-rw-r--r--drivers/net/wireless/ath/regd.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c12
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c336
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c59
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/ioctl.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c81
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c253
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c204
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h19
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c67
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h12
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h110
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h8
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c136
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h1264
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/atmel/atmel.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c40
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ac.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.c16
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lcn.c10
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lp.c30
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c176
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.c14
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_nphy.c16
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_phy_lcn.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c8
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c12
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c74
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c4
-rw-r--r--drivers/net/wireless/cisco/airo.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c8
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c12
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c30
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c22
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c20
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c41
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c22
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c92
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h30
-rw-r--r--drivers/net/wireless/intel/iwlegacy/debug.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-2000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c4
-rw-r--r--drivers/net/wireless/intersil/orinoco/cfg.c6
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/scan.c4
-rw-r--r--drivers/net/wireless/intersil/p54/eeprom.c32
-rw-r--r--drivers/net/wireless/intersil/p54/main.c4
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h2
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c19
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c10
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c63
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c98
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c28
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c92
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c88
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c30
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c43
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c22
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c22
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c12
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c193
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h130
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c44
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c847
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c611
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c865
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c652
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c851
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c39
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h7
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c100
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_pkt.c24
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h2
-rw-r--r--drivers/net/wireless/st/cw1200/main.c10
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c2
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c6
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/ps.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c8
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c22
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c22
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c8
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c36
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h6
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c42
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h2
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c4
469 files changed, 20538 insertions, 9075 deletions
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 95944d5e3e22..780f22876538 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -545,12 +545,11 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
545 priv->port_sts[port].bridge_dev = NULL; 545 priv->port_sts[port].bridge_dev = NULL;
546} 546}
547 547
548static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, 548static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
549 u8 state) 549 u8 state)
550{ 550{
551 struct bcm_sf2_priv *priv = ds_to_priv(ds); 551 struct bcm_sf2_priv *priv = ds_to_priv(ds);
552 u8 hw_state, cur_hw_state; 552 u8 hw_state, cur_hw_state;
553 int ret = 0;
554 u32 reg; 553 u32 reg;
555 554
556 reg = core_readl(priv, CORE_G_PCTL_PORT(port)); 555 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
@@ -574,7 +573,7 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
574 break; 573 break;
575 default: 574 default:
576 pr_err("%s: invalid STP state: %d\n", __func__, state); 575 pr_err("%s: invalid STP state: %d\n", __func__, state);
577 return -EINVAL; 576 return;
578 } 577 }
579 578
580 /* Fast-age ARL entries if we are moving a port from Learning or 579 /* Fast-age ARL entries if we are moving a port from Learning or
@@ -584,10 +583,9 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
584 if (cur_hw_state != hw_state) { 583 if (cur_hw_state != hw_state) {
585 if (cur_hw_state >= G_MISTP_LEARN_STATE && 584 if (cur_hw_state >= G_MISTP_LEARN_STATE &&
586 hw_state <= G_MISTP_LISTEN_STATE) { 585 hw_state <= G_MISTP_LISTEN_STATE) {
587 ret = bcm_sf2_sw_fast_age_port(ds, port); 586 if (bcm_sf2_sw_fast_age_port(ds, port)) {
588 if (ret) {
589 pr_err("%s: fast-ageing failed\n", __func__); 587 pr_err("%s: fast-ageing failed\n", __func__);
590 return ret; 588 return;
591 } 589 }
592 } 590 }
593 } 591 }
@@ -596,8 +594,6 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
596 reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); 594 reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
597 reg |= hw_state; 595 reg |= hw_state;
598 core_writel(priv, reg, CORE_G_PCTL_PORT(port)); 596 core_writel(priv, reg, CORE_G_PCTL_PORT(port));
599
600 return 0;
601} 597}
602 598
603/* Address Resolution Logic routines */ 599/* Address Resolution Logic routines */
@@ -728,13 +724,14 @@ static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
728 return 0; 724 return 0;
729} 725}
730 726
731static int bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port, 727static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
732 const struct switchdev_obj_port_fdb *fdb, 728 const struct switchdev_obj_port_fdb *fdb,
733 struct switchdev_trans *trans) 729 struct switchdev_trans *trans)
734{ 730{
735 struct bcm_sf2_priv *priv = ds_to_priv(ds); 731 struct bcm_sf2_priv *priv = ds_to_priv(ds);
736 732
737 return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true); 733 if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
734 pr_err("%s: failed to add MAC address\n", __func__);
738} 735}
739 736
740static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port, 737static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
@@ -1387,7 +1384,7 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = {
1387 .set_eee = bcm_sf2_sw_set_eee, 1384 .set_eee = bcm_sf2_sw_set_eee,
1388 .port_bridge_join = bcm_sf2_sw_br_join, 1385 .port_bridge_join = bcm_sf2_sw_br_join,
1389 .port_bridge_leave = bcm_sf2_sw_br_leave, 1386 .port_bridge_leave = bcm_sf2_sw_br_leave,
1390 .port_stp_update = bcm_sf2_sw_br_set_stp_state, 1387 .port_stp_state_set = bcm_sf2_sw_br_set_stp_state,
1391 .port_fdb_prepare = bcm_sf2_sw_fdb_prepare, 1388 .port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
1392 .port_fdb_add = bcm_sf2_sw_fdb_add, 1389 .port_fdb_add = bcm_sf2_sw_fdb_add,
1393 .port_fdb_del = bcm_sf2_sw_fdb_del, 1390 .port_fdb_del = bcm_sf2_sw_fdb_del,
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index a92ca651c399..24070287c2bc 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -169,6 +169,17 @@ struct dsa_switch_driver mv88e6131_switch_driver = {
169 .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, 169 .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
170 .get_sset_count = mv88e6xxx_get_sset_count, 170 .get_sset_count = mv88e6xxx_get_sset_count,
171 .adjust_link = mv88e6xxx_adjust_link, 171 .adjust_link = mv88e6xxx_adjust_link,
172 .port_bridge_join = mv88e6xxx_port_bridge_join,
173 .port_bridge_leave = mv88e6xxx_port_bridge_leave,
174 .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
175 .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
176 .port_vlan_add = mv88e6xxx_port_vlan_add,
177 .port_vlan_del = mv88e6xxx_port_vlan_del,
178 .port_vlan_dump = mv88e6xxx_port_vlan_dump,
179 .port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
180 .port_fdb_add = mv88e6xxx_port_fdb_add,
181 .port_fdb_del = mv88e6xxx_port_fdb_del,
182 .port_fdb_dump = mv88e6xxx_port_fdb_dump,
172}; 183};
173 184
174MODULE_ALIAS("platform:mv88e6085"); 185MODULE_ALIAS("platform:mv88e6085");
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index c0164b98fc08..0e62f3b5bc81 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -105,7 +105,7 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
105 .get_regs = mv88e6xxx_get_regs, 105 .get_regs = mv88e6xxx_get_regs,
106 .port_bridge_join = mv88e6xxx_port_bridge_join, 106 .port_bridge_join = mv88e6xxx_port_bridge_join,
107 .port_bridge_leave = mv88e6xxx_port_bridge_leave, 107 .port_bridge_leave = mv88e6xxx_port_bridge_leave,
108 .port_stp_update = mv88e6xxx_port_stp_update, 108 .port_stp_state_set = mv88e6xxx_port_stp_state_set,
109 .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, 109 .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
110 .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, 110 .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
111 .port_vlan_add = mv88e6xxx_port_vlan_add, 111 .port_vlan_add = mv88e6xxx_port_vlan_add,
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 5f528abc8af1..7f452e4a04a5 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -326,7 +326,7 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
326 .get_regs = mv88e6xxx_get_regs, 326 .get_regs = mv88e6xxx_get_regs,
327 .port_bridge_join = mv88e6xxx_port_bridge_join, 327 .port_bridge_join = mv88e6xxx_port_bridge_join,
328 .port_bridge_leave = mv88e6xxx_port_bridge_leave, 328 .port_bridge_leave = mv88e6xxx_port_bridge_leave,
329 .port_stp_update = mv88e6xxx_port_stp_update, 329 .port_stp_state_set = mv88e6xxx_port_stp_state_set,
330 .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, 330 .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
331 .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, 331 .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
332 .port_vlan_add = mv88e6xxx_port_vlan_add, 332 .port_vlan_add = mv88e6xxx_port_vlan_add,
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fa086e09d6b7..62320fca6712 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -482,6 +482,50 @@ static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
482 return false; 482 return false;
483} 483}
484 484
485static unsigned int mv88e6xxx_num_databases(struct dsa_switch *ds)
486{
487 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
488
489 /* The following devices have 4-bit identifiers for 16 databases */
490 if (ps->id == PORT_SWITCH_ID_6061)
491 return 16;
492
493 /* The following devices have 6-bit identifiers for 64 databases */
494 if (ps->id == PORT_SWITCH_ID_6065)
495 return 64;
496
497 /* The following devices have 8-bit identifiers for 256 databases */
498 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
499 return 256;
500
501 /* The following devices have 12-bit identifiers for 4096 databases */
502 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
503 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds))
504 return 4096;
505
506 return 0;
507}
508
509static bool mv88e6xxx_has_fid_reg(struct dsa_switch *ds)
510{
511 /* Does the device have dedicated FID registers for ATU and VTU ops? */
512 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
513 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds))
514 return true;
515
516 return false;
517}
518
519static bool mv88e6xxx_has_stu(struct dsa_switch *ds)
520{
521 /* Does the device have STU and dedicated SID registers for VTU ops? */
522 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
523 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds))
524 return true;
525
526 return false;
527}
528
485/* We expect the switch to perform auto negotiation if there is a real 529/* We expect the switch to perform auto negotiation if there is a real
486 * phy. However, in the case of a fixed link phy, we force the port 530 * phy. However, in the case of a fixed link phy, we force the port
487 * settings from the fixed link settings. 531 * settings from the fixed link settings.
@@ -951,10 +995,30 @@ out:
951 return ret; 995 return ret;
952} 996}
953 997
954static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd) 998static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 fid, u16 cmd)
955{ 999{
956 int ret; 1000 int ret;
957 1001
1002 if (mv88e6xxx_has_fid_reg(ds)) {
1003 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
1004 if (ret < 0)
1005 return ret;
1006 } else if (mv88e6xxx_num_databases(ds) == 256) {
1007 /* ATU DBNum[7:4] are located in ATU Control 15:12 */
1008 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_CONTROL);
1009 if (ret < 0)
1010 return ret;
1011
1012 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_CONTROL,
1013 (ret & 0xfff) |
1014 ((fid << 8) & 0xf000));
1015 if (ret < 0)
1016 return ret;
1017
1018 /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
1019 cmd |= fid & 0xf;
1020 }
1021
958 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd); 1022 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
959 if (ret < 0) 1023 if (ret < 0)
960 return ret; 1024 return ret;
@@ -1001,11 +1065,6 @@ static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
1001 return err; 1065 return err;
1002 1066
1003 if (entry->fid) { 1067 if (entry->fid) {
1004 err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
1005 entry->fid);
1006 if (err)
1007 return err;
1008
1009 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB : 1068 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
1010 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB; 1069 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
1011 } else { 1070 } else {
@@ -1013,7 +1072,7 @@ static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
1013 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC; 1072 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
1014 } 1073 }
1015 1074
1016 return _mv88e6xxx_atu_cmd(ds, op); 1075 return _mv88e6xxx_atu_cmd(ds, entry->fid, op);
1017} 1076}
1018 1077
1019static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too) 1078static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
@@ -1134,7 +1193,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
1134 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg); 1193 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
1135} 1194}
1136 1195
1137int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state) 1196void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
1138{ 1197{
1139 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1198 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1140 int stp_state; 1199 int stp_state;
@@ -1156,14 +1215,12 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1156 break; 1215 break;
1157 } 1216 }
1158 1217
1159 /* mv88e6xxx_port_stp_update may be called with softirqs disabled, 1218 /* mv88e6xxx_port_stp_state_set may be called with softirqs disabled,
1160 * so we can not update the port state directly but need to schedule it. 1219 * so we can not update the port state directly but need to schedule it.
1161 */ 1220 */
1162 ps->ports[port].state = stp_state; 1221 ps->ports[port].state = stp_state;
1163 set_bit(port, ps->port_state_update_mask); 1222 set_bit(port, ps->port_state_update_mask);
1164 schedule_work(&ps->bridge_work); 1223 schedule_work(&ps->bridge_work);
1165
1166 return 0;
1167} 1224}
1168 1225
1169static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new, 1226static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
@@ -1321,15 +1378,27 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
1321 if (ret < 0) 1378 if (ret < 0)
1322 return ret; 1379 return ret;
1323 1380
1324 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || 1381 if (mv88e6xxx_has_fid_reg(ds)) {
1325 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1326 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, 1382 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1327 GLOBAL_VTU_FID); 1383 GLOBAL_VTU_FID);
1328 if (ret < 0) 1384 if (ret < 0)
1329 return ret; 1385 return ret;
1330 1386
1331 next.fid = ret & GLOBAL_VTU_FID_MASK; 1387 next.fid = ret & GLOBAL_VTU_FID_MASK;
1388 } else if (mv88e6xxx_num_databases(ds) == 256) {
1389 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1390 * VTU DBNum[3:0] are located in VTU Operation 3:0
1391 */
1392 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1393 GLOBAL_VTU_OP);
1394 if (ret < 0)
1395 return ret;
1396
1397 next.fid = (ret & 0xf00) >> 4;
1398 next.fid |= ret & 0xf;
1399 }
1332 1400
1401 if (mv88e6xxx_has_stu(ds)) {
1333 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, 1402 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1334 GLOBAL_VTU_SID); 1403 GLOBAL_VTU_SID);
1335 if (ret < 0) 1404 if (ret < 0)
@@ -1397,6 +1466,7 @@ unlock:
1397static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, 1466static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
1398 struct mv88e6xxx_vtu_stu_entry *entry) 1467 struct mv88e6xxx_vtu_stu_entry *entry)
1399{ 1468{
1469 u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
1400 u16 reg = 0; 1470 u16 reg = 0;
1401 int ret; 1471 int ret;
1402 1472
@@ -1412,17 +1482,24 @@ static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
1412 if (ret < 0) 1482 if (ret < 0)
1413 return ret; 1483 return ret;
1414 1484
1415 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || 1485 if (mv88e6xxx_has_stu(ds)) {
1416 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1417 reg = entry->sid & GLOBAL_VTU_SID_MASK; 1486 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1418 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg); 1487 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1419 if (ret < 0) 1488 if (ret < 0)
1420 return ret; 1489 return ret;
1490 }
1421 1491
1492 if (mv88e6xxx_has_fid_reg(ds)) {
1422 reg = entry->fid & GLOBAL_VTU_FID_MASK; 1493 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1423 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg); 1494 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1424 if (ret < 0) 1495 if (ret < 0)
1425 return ret; 1496 return ret;
1497 } else if (mv88e6xxx_num_databases(ds) == 256) {
1498 /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
1499 * VTU DBNum[3:0] are located in VTU Operation 3:0
1500 */
1501 op |= (entry->fid & 0xf0) << 8;
1502 op |= entry->fid & 0xf;
1426 } 1503 }
1427 1504
1428 reg = GLOBAL_VTU_VID_VALID; 1505 reg = GLOBAL_VTU_VID_VALID;
@@ -1432,7 +1509,7 @@ loadpurge:
1432 if (ret < 0) 1509 if (ret < 0)
1433 return ret; 1510 return ret;
1434 1511
1435 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE); 1512 return _mv88e6xxx_vtu_cmd(ds, op);
1436} 1513}
1437 1514
1438static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid, 1515static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
@@ -1511,9 +1588,17 @@ loadpurge:
1511static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new, 1588static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
1512 u16 *old) 1589 u16 *old)
1513{ 1590{
1591 u16 upper_mask;
1514 u16 fid; 1592 u16 fid;
1515 int ret; 1593 int ret;
1516 1594
1595 if (mv88e6xxx_num_databases(ds) == 4096)
1596 upper_mask = 0xff;
1597 else if (mv88e6xxx_num_databases(ds) == 256)
1598 upper_mask = 0xf;
1599 else
1600 return -EOPNOTSUPP;
1601
1517 /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */ 1602 /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
1518 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN); 1603 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
1519 if (ret < 0) 1604 if (ret < 0)
@@ -1536,11 +1621,11 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
1536 if (ret < 0) 1621 if (ret < 0)
1537 return ret; 1622 return ret;
1538 1623
1539 fid |= (ret & PORT_CONTROL_1_FID_11_4_MASK) << 4; 1624 fid |= (ret & upper_mask) << 4;
1540 1625
1541 if (new) { 1626 if (new) {
1542 ret &= ~PORT_CONTROL_1_FID_11_4_MASK; 1627 ret &= ~upper_mask;
1543 ret |= (*new >> 4) & PORT_CONTROL_1_FID_11_4_MASK; 1628 ret |= (*new >> 4) & upper_mask;
1544 1629
1545 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 1630 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1,
1546 ret); 1631 ret);
@@ -1604,7 +1689,7 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
1604 * databases are not needed. Return the next positive available. 1689 * databases are not needed. Return the next positive available.
1605 */ 1690 */
1606 *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1); 1691 *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
1607 if (unlikely(*fid == MV88E6XXX_N_FID)) 1692 if (unlikely(*fid >= mv88e6xxx_num_databases(ds)))
1608 return -ENOSPC; 1693 return -ENOSPC;
1609 1694
1610 /* Clear the database */ 1695 /* Clear the database */
@@ -1823,31 +1908,27 @@ static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
1823 return _mv88e6xxx_vtu_loadpurge(ds, &vlan); 1908 return _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1824} 1909}
1825 1910
1826int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, 1911void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
1827 const struct switchdev_obj_port_vlan *vlan, 1912 const struct switchdev_obj_port_vlan *vlan,
1828 struct switchdev_trans *trans) 1913 struct switchdev_trans *trans)
1829{ 1914{
1830 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1915 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1831 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1916 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1832 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1917 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1833 u16 vid; 1918 u16 vid;
1834 int err = 0;
1835 1919
1836 mutex_lock(&ps->smi_mutex); 1920 mutex_lock(&ps->smi_mutex);
1837 1921
1838 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { 1922 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
1839 err = _mv88e6xxx_port_vlan_add(ds, port, vid, untagged); 1923 if (_mv88e6xxx_port_vlan_add(ds, port, vid, untagged))
1840 if (err) 1924 netdev_err(ds->ports[port], "failed to add VLAN %d%c\n",
1841 goto unlock; 1925 vid, untagged ? 'u' : 't');
1842 }
1843 1926
1844 /* no PVID with ranges, otherwise it's a bug */ 1927 if (pvid && _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end))
1845 if (pvid) 1928 netdev_err(ds->ports[port], "failed to set PVID %d\n",
1846 err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end); 1929 vlan->vid_end);
1847unlock:
1848 mutex_unlock(&ps->smi_mutex);
1849 1930
1850 return err; 1931 mutex_unlock(&ps->smi_mutex);
1851} 1932}
1852 1933
1853static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid) 1934static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
@@ -1965,11 +2046,7 @@ static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
1965 if (ret < 0) 2046 if (ret < 0)
1966 return ret; 2047 return ret;
1967 2048
1968 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid); 2049 return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
1969 if (ret < 0)
1970 return ret;
1971
1972 return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
1973} 2050}
1974 2051
1975static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port, 2052static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
@@ -2009,21 +2086,19 @@ int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
2009 return 0; 2086 return 0;
2010} 2087}
2011 2088
2012int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, 2089void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
2013 const struct switchdev_obj_port_fdb *fdb, 2090 const struct switchdev_obj_port_fdb *fdb,
2014 struct switchdev_trans *trans) 2091 struct switchdev_trans *trans)
2015{ 2092{
2016 int state = is_multicast_ether_addr(fdb->addr) ? 2093 int state = is_multicast_ether_addr(fdb->addr) ?
2017 GLOBAL_ATU_DATA_STATE_MC_STATIC : 2094 GLOBAL_ATU_DATA_STATE_MC_STATIC :
2018 GLOBAL_ATU_DATA_STATE_UC_STATIC; 2095 GLOBAL_ATU_DATA_STATE_UC_STATIC;
2019 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2096 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2020 int ret;
2021 2097
2022 mutex_lock(&ps->smi_mutex); 2098 mutex_lock(&ps->smi_mutex);
2023 ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state); 2099 if (_mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state))
2100 netdev_err(ds->ports[port], "failed to load MAC address\n");
2024 mutex_unlock(&ps->smi_mutex); 2101 mutex_unlock(&ps->smi_mutex);
2025
2026 return ret;
2027} 2102}
2028 2103
2029int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, 2104int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
@@ -2052,11 +2127,7 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
2052 if (ret < 0) 2127 if (ret < 0)
2053 return ret; 2128 return ret;
2054 2129
2055 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid); 2130 ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
2056 if (ret < 0)
2057 return ret;
2058
2059 ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
2060 if (ret < 0) 2131 if (ret < 0)
2061 return ret; 2132 return ret;
2062 2133
@@ -2264,6 +2335,57 @@ static void mv88e6xxx_bridge_work(struct work_struct *work)
2264 mutex_unlock(&ps->smi_mutex); 2335 mutex_unlock(&ps->smi_mutex);
2265} 2336}
2266 2337
2338static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2339 int reg, int val)
2340{
2341 int ret;
2342
2343 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2344 if (ret < 0)
2345 goto restore_page_0;
2346
2347 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
2348restore_page_0:
2349 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2350
2351 return ret;
2352}
2353
2354static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page,
2355 int reg)
2356{
2357 int ret;
2358
2359 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2360 if (ret < 0)
2361 goto restore_page_0;
2362
2363 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
2364restore_page_0:
2365 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2366
2367 return ret;
2368}
2369
2370static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
2371{
2372 int ret;
2373
2374 ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
2375 MII_BMCR);
2376 if (ret < 0)
2377 return ret;
2378
2379 if (ret & BMCR_PDOWN) {
2380 ret &= ~BMCR_PDOWN;
2381 ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES,
2382 PAGE_FIBER_SERDES, MII_BMCR,
2383 ret);
2384 }
2385
2386 return ret;
2387}
2388
2267static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) 2389static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2268{ 2390{
2269 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2391 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -2367,6 +2489,23 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2367 goto abort; 2489 goto abort;
2368 } 2490 }
2369 2491
2492 /* If this port is connected to a SerDes, make sure the SerDes is not
2493 * powered down.
2494 */
2495 if (mv88e6xxx_6352_family(ds)) {
2496 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
2497 if (ret < 0)
2498 goto abort;
2499 ret &= PORT_STATUS_CMODE_MASK;
2500 if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
2501 (ret == PORT_STATUS_CMODE_1000BASE_X) ||
2502 (ret == PORT_STATUS_CMODE_SGMII)) {
2503 ret = mv88e6xxx_power_on_serdes(ds);
2504 if (ret < 0)
2505 goto abort;
2506 }
2507 }
2508
2370 /* Port Control 2: don't force a good FCS, set the maximum frame size to 2509 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2371 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or 2510 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
2372 * untagged frames on this port, do a destination address lookup on all 2511 * untagged frames on this port, do a destination address lookup on all
@@ -2376,7 +2515,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2376 reg = 0; 2515 reg = 0;
2377 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || 2516 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2378 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || 2517 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2379 mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds)) 2518 mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds) ||
2519 mv88e6xxx_6185_family(ds))
2380 reg = PORT_CONTROL_2_MAP_DA; 2520 reg = PORT_CONTROL_2_MAP_DA;
2381 2521
2382 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || 2522 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
@@ -2714,13 +2854,9 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2714 int ret; 2854 int ret;
2715 2855
2716 mutex_lock(&ps->smi_mutex); 2856 mutex_lock(&ps->smi_mutex);
2717 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); 2857 ret = _mv88e6xxx_phy_page_read(ds, port, page, reg);
2718 if (ret < 0)
2719 goto error;
2720 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
2721error:
2722 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2723 mutex_unlock(&ps->smi_mutex); 2858 mutex_unlock(&ps->smi_mutex);
2859
2724 return ret; 2860 return ret;
2725} 2861}
2726 2862
@@ -2731,14 +2867,9 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2731 int ret; 2867 int ret;
2732 2868
2733 mutex_lock(&ps->smi_mutex); 2869 mutex_lock(&ps->smi_mutex);
2734 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); 2870 ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val);
2735 if (ret < 0)
2736 goto error;
2737
2738 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
2739error:
2740 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2741 mutex_unlock(&ps->smi_mutex); 2871 mutex_unlock(&ps->smi_mutex);
2872
2742 return ret; 2873 return ret;
2743} 2874}
2744 2875
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 9a038aba48fb..236bcaa606e7 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -28,6 +28,10 @@
28#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY) 28#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
29#define SMI_DATA 0x01 29#define SMI_DATA 0x01
30 30
31/* Fiber/SERDES Registers are located at SMI address F, page 1 */
32#define REG_FIBER_SERDES 0x0f
33#define PAGE_FIBER_SERDES 0x01
34
31#define REG_PORT(p) (0x10 + (p)) 35#define REG_PORT(p) (0x10 + (p))
32#define PORT_STATUS 0x00 36#define PORT_STATUS 0x00
33#define PORT_STATUS_PAUSE_EN BIT(15) 37#define PORT_STATUS_PAUSE_EN BIT(15)
@@ -45,6 +49,10 @@
45#define PORT_STATUS_MGMII BIT(6) /* 6185 */ 49#define PORT_STATUS_MGMII BIT(6) /* 6185 */
46#define PORT_STATUS_TX_PAUSED BIT(5) 50#define PORT_STATUS_TX_PAUSED BIT(5)
47#define PORT_STATUS_FLOW_CTRL BIT(4) 51#define PORT_STATUS_FLOW_CTRL BIT(4)
52#define PORT_STATUS_CMODE_MASK 0x0f
53#define PORT_STATUS_CMODE_100BASE_X 0x8
54#define PORT_STATUS_CMODE_1000BASE_X 0x9
55#define PORT_STATUS_CMODE_SGMII 0xa
48#define PORT_PCS_CTRL 0x01 56#define PORT_PCS_CTRL 0x01
49#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15) 57#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15)
50#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14) 58#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14)
@@ -489,15 +497,15 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
489int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, 497int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
490 struct net_device *bridge); 498 struct net_device *bridge);
491void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port); 499void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port);
492int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state); 500void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
493int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, 501int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
494 bool vlan_filtering); 502 bool vlan_filtering);
495int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, 503int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
496 const struct switchdev_obj_port_vlan *vlan, 504 const struct switchdev_obj_port_vlan *vlan,
497 struct switchdev_trans *trans); 505 struct switchdev_trans *trans);
498int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, 506void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
499 const struct switchdev_obj_port_vlan *vlan, 507 const struct switchdev_obj_port_vlan *vlan,
500 struct switchdev_trans *trans); 508 struct switchdev_trans *trans);
501int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, 509int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
502 const struct switchdev_obj_port_vlan *vlan); 510 const struct switchdev_obj_port_vlan *vlan);
503int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, 511int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
@@ -506,9 +514,9 @@ int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
506int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, 514int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
507 const struct switchdev_obj_port_fdb *fdb, 515 const struct switchdev_obj_port_fdb *fdb,
508 struct switchdev_trans *trans); 516 struct switchdev_trans *trans);
509int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, 517void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
510 const struct switchdev_obj_port_fdb *fdb, 518 const struct switchdev_obj_port_fdb *fdb,
511 struct switchdev_trans *trans); 519 struct switchdev_trans *trans);
512int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, 520int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
513 const struct switchdev_obj_port_fdb *fdb); 521 const struct switchdev_obj_port_fdb *fdb);
514int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, 522int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aabbd51db981..597e4724a474 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,6 +1,6 @@
1/* Broadcom NetXtreme-C/E network driver. 1/* Broadcom NetXtreme-C/E network driver.
2 * 2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation 3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -118,6 +118,12 @@ static const u16 bnxt_vf_req_snif[] = {
118 HWRM_CFA_L2_FILTER_ALLOC, 118 HWRM_CFA_L2_FILTER_ALLOC,
119}; 119};
120 120
121static const u16 bnxt_async_events_arr[] = {
122 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
123 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
124 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
125};
126
121static bool bnxt_vf_pciid(enum board_idx idx) 127static bool bnxt_vf_pciid(enum board_idx idx)
122{ 128{
123 return (idx == BCM57304_VF || idx == BCM57404_VF); 129 return (idx == BCM57304_VF || idx == BCM57404_VF);
@@ -1231,6 +1237,19 @@ next_rx_no_prod:
1231 return rc; 1237 return rc;
1232} 1238}
1233 1239
1240#define BNXT_GET_EVENT_PORT(data) \
1241 ((data) & \
1242 HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1243
1244#define BNXT_EVENT_POLICY_MASK \
1245 HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK
1246
1247#define BNXT_EVENT_POLICY_SFT \
1248 HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT
1249
1250#define BNXT_GET_EVENT_POLICY(data) \
1251 (((data) & BNXT_EVENT_POLICY_MASK) >> BNXT_EVENT_POLICY_SFT)
1252
1234static int bnxt_async_event_process(struct bnxt *bp, 1253static int bnxt_async_event_process(struct bnxt *bp,
1235 struct hwrm_async_event_cmpl *cmpl) 1254 struct hwrm_async_event_cmpl *cmpl)
1236{ 1255{
@@ -1244,6 +1263,22 @@ static int bnxt_async_event_process(struct bnxt *bp,
1244 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 1263 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1245 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 1264 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1246 break; 1265 break;
1266 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1267 u32 data1 = le32_to_cpu(cmpl->event_data1);
1268 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1269
1270 if (BNXT_VF(bp))
1271 break;
1272
1273 if (bp->pf.port_id != port_id)
1274 break;
1275
1276 bp->link_info.last_port_module_event =
1277 BNXT_GET_EVENT_POLICY(data1);
1278
1279 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1280 break;
1281 }
1247 default: 1282 default:
1248 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", 1283 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
1249 event_id); 1284 event_id);
@@ -2653,7 +2688,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
2653 /* Write request msg to hwrm channel */ 2688 /* Write request msg to hwrm channel */
2654 __iowrite32_copy(bp->bar0, data, msg_len / 4); 2689 __iowrite32_copy(bp->bar0, data, msg_len / 4);
2655 2690
2656 for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4) 2691 for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
2657 writel(0, bp->bar0 + i); 2692 writel(0, bp->bar0 + i);
2658 2693
2659 /* currently supports only one outstanding message */ 2694 /* currently supports only one outstanding message */
@@ -2751,6 +2786,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2751{ 2786{
2752 struct hwrm_func_drv_rgtr_input req = {0}; 2787 struct hwrm_func_drv_rgtr_input req = {0};
2753 int i; 2788 int i;
2789 DECLARE_BITMAP(async_events_bmap, 256);
2790 u32 *events = (u32 *)async_events_bmap;
2754 2791
2755 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 2792 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
2756 2793
@@ -2759,11 +2796,14 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2759 FUNC_DRV_RGTR_REQ_ENABLES_VER | 2796 FUNC_DRV_RGTR_REQ_ENABLES_VER |
2760 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 2797 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
2761 2798
2762 /* TODO: current async event fwd bits are not defined and the firmware 2799 memset(async_events_bmap, 0, sizeof(async_events_bmap));
2763 * only checks if it is non-zero to enable async event forwarding 2800 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
2764 */ 2801 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
2765 req.async_event_fwd[0] |= cpu_to_le32(1); 2802
2766 req.os_type = cpu_to_le16(1); 2803 for (i = 0; i < 8; i++)
2804 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
2805
2806 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
2767 req.ver_maj = DRV_VER_MAJ; 2807 req.ver_maj = DRV_VER_MAJ;
2768 req.ver_min = DRV_VER_MIN; 2808 req.ver_min = DRV_VER_MIN;
2769 req.ver_upd = DRV_VER_UPD; 2809 req.ver_upd = DRV_VER_UPD;
@@ -3391,11 +3431,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3391 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3431 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3392 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3432 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3393 3433
3434 cpr->cp_doorbell = bp->bar1 + i * 0x80;
3394 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, 3435 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3395 INVALID_STATS_CTX_ID); 3436 INVALID_STATS_CTX_ID);
3396 if (rc) 3437 if (rc)
3397 goto err_out; 3438 goto err_out;
3398 cpr->cp_doorbell = bp->bar1 + i * 0x80;
3399 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 3439 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3400 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 3440 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
3401 } 3441 }
@@ -3726,7 +3766,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3726 3766
3727 pf->fw_fid = le16_to_cpu(resp->fid); 3767 pf->fw_fid = le16_to_cpu(resp->fid);
3728 pf->port_id = le16_to_cpu(resp->port_id); 3768 pf->port_id = le16_to_cpu(resp->port_id);
3729 memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); 3769 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
3730 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); 3770 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
3731 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 3771 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3732 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 3772 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -3751,7 +3791,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3751 struct bnxt_vf_info *vf = &bp->vf; 3791 struct bnxt_vf_info *vf = &bp->vf;
3752 3792
3753 vf->fw_fid = le16_to_cpu(resp->fid); 3793 vf->fw_fid = le16_to_cpu(resp->fid);
3754 memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); 3794 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
3755 if (is_valid_ether_addr(vf->mac_addr)) 3795 if (is_valid_ether_addr(vf->mac_addr))
3756 /* overwrite netdev dev_adr with admin VF MAC */ 3796 /* overwrite netdev dev_adr with admin VF MAC */
3757 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 3797 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
@@ -3830,6 +3870,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
3830 struct hwrm_ver_get_input req = {0}; 3870 struct hwrm_ver_get_input req = {0};
3831 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 3871 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
3832 3872
3873 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
3833 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 3874 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
3834 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 3875 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
3835 req.hwrm_intf_min = HWRM_VERSION_MINOR; 3876 req.hwrm_intf_min = HWRM_VERSION_MINOR;
@@ -3841,6 +3882,8 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
3841 3882
3842 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 3883 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
3843 3884
3885 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
3886 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
3844 if (resp->hwrm_intf_maj < 1) { 3887 if (resp->hwrm_intf_maj < 1) {
3845 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 3888 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
3846 resp->hwrm_intf_maj, resp->hwrm_intf_min, 3889 resp->hwrm_intf_maj, resp->hwrm_intf_min,
@@ -3855,6 +3898,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
3855 if (!bp->hwrm_cmd_timeout) 3898 if (!bp->hwrm_cmd_timeout)
3856 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 3899 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
3857 3900
3901 if (resp->hwrm_intf_maj >= 1)
3902 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
3903
3858hwrm_ver_get_exit: 3904hwrm_ver_get_exit:
3859 mutex_unlock(&bp->hwrm_cmd_lock); 3905 mutex_unlock(&bp->hwrm_cmd_lock);
3860 return rc; 3906 return rc;
@@ -4482,12 +4528,49 @@ static void bnxt_report_link(struct bnxt *bp)
4482 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 4528 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
4483 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 4529 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
4484 speed, duplex, flow_ctrl); 4530 speed, duplex, flow_ctrl);
4531 if (bp->flags & BNXT_FLAG_EEE_CAP)
4532 netdev_info(bp->dev, "EEE is %s\n",
4533 bp->eee.eee_active ? "active" :
4534 "not active");
4485 } else { 4535 } else {
4486 netif_carrier_off(bp->dev); 4536 netif_carrier_off(bp->dev);
4487 netdev_err(bp->dev, "NIC Link is Down\n"); 4537 netdev_err(bp->dev, "NIC Link is Down\n");
4488 } 4538 }
4489} 4539}
4490 4540
4541static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
4542{
4543 int rc = 0;
4544 struct hwrm_port_phy_qcaps_input req = {0};
4545 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4546
4547 if (bp->hwrm_spec_code < 0x10201)
4548 return 0;
4549
4550 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
4551
4552 mutex_lock(&bp->hwrm_cmd_lock);
4553 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4554 if (rc)
4555 goto hwrm_phy_qcaps_exit;
4556
4557 if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
4558 struct ethtool_eee *eee = &bp->eee;
4559 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
4560
4561 bp->flags |= BNXT_FLAG_EEE_CAP;
4562 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
4563 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
4564 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
4565 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
4566 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
4567 }
4568
4569hwrm_phy_qcaps_exit:
4570 mutex_unlock(&bp->hwrm_cmd_lock);
4571 return rc;
4572}
4573
4491static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 4574static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
4492{ 4575{
4493 int rc = 0; 4576 int rc = 0;
@@ -4519,7 +4602,6 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
4519 else 4602 else
4520 link_info->link_speed = 0; 4603 link_info->link_speed = 0;
4521 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 4604 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
4522 link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
4523 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 4605 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
4524 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 4606 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
4525 link_info->lp_auto_link_speeds = 4607 link_info->lp_auto_link_speeds =
@@ -4529,9 +4611,45 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
4529 link_info->phy_ver[1] = resp->phy_min; 4611 link_info->phy_ver[1] = resp->phy_min;
4530 link_info->phy_ver[2] = resp->phy_bld; 4612 link_info->phy_ver[2] = resp->phy_bld;
4531 link_info->media_type = resp->media_type; 4613 link_info->media_type = resp->media_type;
4532 link_info->transceiver = resp->transceiver_type; 4614 link_info->transceiver = resp->xcvr_pkg_type;
4533 link_info->phy_addr = resp->phy_addr; 4615 link_info->phy_addr = resp->eee_config_phy_addr &
4616 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
4617
4618 if (bp->flags & BNXT_FLAG_EEE_CAP) {
4619 struct ethtool_eee *eee = &bp->eee;
4620 u16 fw_speeds;
4621
4622 eee->eee_active = 0;
4623 if (resp->eee_config_phy_addr &
4624 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
4625 eee->eee_active = 1;
4626 fw_speeds = le16_to_cpu(
4627 resp->link_partner_adv_eee_link_speed_mask);
4628 eee->lp_advertised =
4629 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
4630 }
4631
4632 /* Pull initial EEE config */
4633 if (!chng_link_state) {
4634 if (resp->eee_config_phy_addr &
4635 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
4636 eee->eee_enabled = 1;
4637
4638 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
4639 eee->advertised =
4640 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
4641
4642 if (resp->eee_config_phy_addr &
4643 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
4644 __le32 tmr;
4534 4645
4646 eee->tx_lpi_enabled = 1;
4647 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
4648 eee->tx_lpi_timer = le32_to_cpu(tmr) &
4649 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
4650 }
4651 }
4652 }
4535 /* TODO: need to add more logic to report VF link */ 4653 /* TODO: need to add more logic to report VF link */
4536 if (chng_link_state) { 4654 if (chng_link_state) {
4537 if (link_info->phy_link_status == BNXT_LINK_LINK) 4655 if (link_info->phy_link_status == BNXT_LINK_LINK)
@@ -4552,10 +4670,13 @@ static void
4552bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 4670bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
4553{ 4671{
4554 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 4672 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
4673 if (bp->hwrm_spec_code >= 0x10201)
4674 req->auto_pause =
4675 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
4555 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 4676 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4556 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 4677 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4557 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 4678 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4558 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 4679 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
4559 req->enables |= 4680 req->enables |=
4560 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 4681 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4561 } else { 4682 } else {
@@ -4565,6 +4686,11 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
4565 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 4686 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
4566 req->enables |= 4687 req->enables |=
4567 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 4688 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
4689 if (bp->hwrm_spec_code >= 0x10201) {
4690 req->auto_pause = req->force_pause;
4691 req->enables |= cpu_to_le32(
4692 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4693 }
4568 } 4694 }
4569} 4695}
4570 4696
@@ -4577,7 +4703,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
4577 4703
4578 if (autoneg & BNXT_AUTONEG_SPEED) { 4704 if (autoneg & BNXT_AUTONEG_SPEED) {
4579 req->auto_mode |= 4705 req->auto_mode |=
4580 PORT_PHY_CFG_REQ_AUTO_MODE_MASK; 4706 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
4581 4707
4582 req->enables |= cpu_to_le32( 4708 req->enables |= cpu_to_le32(
4583 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 4709 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
@@ -4591,9 +4717,6 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
4591 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 4717 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
4592 } 4718 }
4593 4719
4594 /* currently don't support half duplex */
4595 req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
4596 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
4597 /* tell chimp that the setting takes effect immediately */ 4720 /* tell chimp that the setting takes effect immediately */
4598 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 4721 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4599} 4722}
@@ -4628,7 +4751,30 @@ int bnxt_hwrm_set_pause(struct bnxt *bp)
4628 return rc; 4751 return rc;
4629} 4752}
4630 4753
4631int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause) 4754static void bnxt_hwrm_set_eee(struct bnxt *bp,
4755 struct hwrm_port_phy_cfg_input *req)
4756{
4757 struct ethtool_eee *eee = &bp->eee;
4758
4759 if (eee->eee_enabled) {
4760 u16 eee_speeds;
4761 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
4762
4763 if (eee->tx_lpi_enabled)
4764 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
4765 else
4766 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
4767
4768 req->flags |= cpu_to_le32(flags);
4769 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
4770 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
4771 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
4772 } else {
4773 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
4774 }
4775}
4776
4777int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
4632{ 4778{
4633 struct hwrm_port_phy_cfg_input req = {0}; 4779 struct hwrm_port_phy_cfg_input req = {0};
4634 4780
@@ -4637,14 +4783,42 @@ int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
4637 bnxt_hwrm_set_pause_common(bp, &req); 4783 bnxt_hwrm_set_pause_common(bp, &req);
4638 4784
4639 bnxt_hwrm_set_link_common(bp, &req); 4785 bnxt_hwrm_set_link_common(bp, &req);
4786
4787 if (set_eee)
4788 bnxt_hwrm_set_eee(bp, &req);
4640 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4789 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4641} 4790}
4642 4791
4792static bool bnxt_eee_config_ok(struct bnxt *bp)
4793{
4794 struct ethtool_eee *eee = &bp->eee;
4795 struct bnxt_link_info *link_info = &bp->link_info;
4796
4797 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
4798 return true;
4799
4800 if (eee->eee_enabled) {
4801 u32 advertising =
4802 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
4803
4804 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4805 eee->eee_enabled = 0;
4806 return false;
4807 }
4808 if (eee->advertised & ~advertising) {
4809 eee->advertised = advertising & eee->supported;
4810 return false;
4811 }
4812 }
4813 return true;
4814}
4815
4643static int bnxt_update_phy_setting(struct bnxt *bp) 4816static int bnxt_update_phy_setting(struct bnxt *bp)
4644{ 4817{
4645 int rc; 4818 int rc;
4646 bool update_link = false; 4819 bool update_link = false;
4647 bool update_pause = false; 4820 bool update_pause = false;
4821 bool update_eee = false;
4648 struct bnxt_link_info *link_info = &bp->link_info; 4822 struct bnxt_link_info *link_info = &bp->link_info;
4649 4823
4650 rc = bnxt_update_link(bp, true); 4824 rc = bnxt_update_link(bp, true);
@@ -4654,7 +4828,8 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
4654 return rc; 4828 return rc;
4655 } 4829 }
4656 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 4830 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4657 link_info->auto_pause_setting != link_info->req_flow_ctrl) 4831 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
4832 link_info->req_flow_ctrl)
4658 update_pause = true; 4833 update_pause = true;
4659 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 4834 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4660 link_info->force_pause_setting != link_info->req_flow_ctrl) 4835 link_info->force_pause_setting != link_info->req_flow_ctrl)
@@ -4673,8 +4848,11 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
4673 update_link = true; 4848 update_link = true;
4674 } 4849 }
4675 4850
4851 if (!bnxt_eee_config_ok(bp))
4852 update_eee = true;
4853
4676 if (update_link) 4854 if (update_link)
4677 rc = bnxt_hwrm_set_link_setting(bp, update_pause); 4855 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
4678 else if (update_pause) 4856 else if (update_pause)
4679 rc = bnxt_hwrm_set_pause(bp); 4857 rc = bnxt_hwrm_set_pause(bp);
4680 if (rc) { 4858 if (rc) {
@@ -5299,6 +5477,28 @@ bnxt_restart_timer:
5299 mod_timer(&bp->timer, jiffies + bp->current_interval); 5477 mod_timer(&bp->timer, jiffies + bp->current_interval);
5300} 5478}
5301 5479
5480static void bnxt_port_module_event(struct bnxt *bp)
5481{
5482 struct bnxt_link_info *link_info = &bp->link_info;
5483 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
5484
5485 if (bnxt_update_link(bp, true))
5486 return;
5487
5488 if (link_info->last_port_module_event != 0) {
5489 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
5490 bp->pf.port_id);
5491 if (bp->hwrm_spec_code >= 0x10201) {
5492 netdev_warn(bp->dev, "Module part number %s\n",
5493 resp->phy_vendor_partnumber);
5494 }
5495 }
5496 if (link_info->last_port_module_event == 1)
5497 netdev_warn(bp->dev, "TX is disabled\n");
5498 if (link_info->last_port_module_event == 3)
5499 netdev_warn(bp->dev, "Shutdown SFP+ module\n");
5500}
5501
5302static void bnxt_cfg_ntp_filters(struct bnxt *); 5502static void bnxt_cfg_ntp_filters(struct bnxt *);
5303 5503
5304static void bnxt_sp_task(struct work_struct *work) 5504static void bnxt_sp_task(struct work_struct *work)
@@ -5346,6 +5546,9 @@ static void bnxt_sp_task(struct work_struct *work)
5346 rtnl_unlock(); 5546 rtnl_unlock();
5347 } 5547 }
5348 5548
5549 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
5550 bnxt_port_module_event(bp);
5551
5349 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) 5552 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
5350 bnxt_hwrm_port_qstats(bp); 5553 bnxt_hwrm_port_qstats(bp);
5351 5554
@@ -5814,6 +6017,13 @@ static int bnxt_probe_phy(struct bnxt *bp)
5814 int rc = 0; 6017 int rc = 0;
5815 struct bnxt_link_info *link_info = &bp->link_info; 6018 struct bnxt_link_info *link_info = &bp->link_info;
5816 6019
6020 rc = bnxt_hwrm_phy_qcaps(bp);
6021 if (rc) {
6022 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
6023 rc);
6024 return rc;
6025 }
6026
5817 rc = bnxt_update_link(bp, false); 6027 rc = bnxt_update_link(bp, false);
5818 if (rc) { 6028 if (rc) {
5819 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 6029 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
@@ -5823,15 +6033,24 @@ static int bnxt_probe_phy(struct bnxt *bp)
5823 6033
5824 /*initialize the ethool setting copy with NVM settings */ 6034 /*initialize the ethool setting copy with NVM settings */
5825 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 6035 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
5826 link_info->autoneg = BNXT_AUTONEG_SPEED | 6036 link_info->autoneg = BNXT_AUTONEG_SPEED;
5827 BNXT_AUTONEG_FLOW_CTRL; 6037 if (bp->hwrm_spec_code >= 0x10201) {
6038 if (link_info->auto_pause_setting &
6039 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
6040 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6041 } else {
6042 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6043 }
5828 link_info->advertising = link_info->auto_link_speeds; 6044 link_info->advertising = link_info->auto_link_speeds;
5829 link_info->req_flow_ctrl = link_info->auto_pause_setting;
5830 } else { 6045 } else {
5831 link_info->req_link_speed = link_info->force_link_speed; 6046 link_info->req_link_speed = link_info->force_link_speed;
5832 link_info->req_duplex = link_info->duplex_setting; 6047 link_info->req_duplex = link_info->duplex_setting;
5833 link_info->req_flow_ctrl = link_info->force_pause_setting;
5834 } 6048 }
6049 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
6050 link_info->req_flow_ctrl =
6051 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
6052 else
6053 link_info->req_flow_ctrl = link_info->force_pause_setting;
5835 return rc; 6054 return rc;
5836} 6055}
5837 6056
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index ec04c47172b7..cc8e38a9f684 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,6 +1,6 @@
1/* Broadcom NetXtreme-C/E network driver. 1/* Broadcom NetXtreme-C/E network driver.
2 * 2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation 3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -11,7 +11,7 @@
11#define BNXT_H 11#define BNXT_H
12 12
13#define DRV_MODULE_NAME "bnxt_en" 13#define DRV_MODULE_NAME "bnxt_en"
14#define DRV_MODULE_VERSION "1.0.0" 14#define DRV_MODULE_VERSION "1.2.0"
15 15
16#define DRV_VER_MAJ 1 16#define DRV_VER_MAJ 1
17#define DRV_VER_MIN 0 17#define DRV_VER_MIN 0
@@ -477,6 +477,7 @@ struct rx_tpa_end_cmp_ext {
477#define RING_CMP(idx) ((idx) & bp->cp_ring_mask) 477#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
478#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) 478#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
479 479
480#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
480#define DFLT_HWRM_CMD_TIMEOUT 500 481#define DFLT_HWRM_CMD_TIMEOUT 500
481#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) 482#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
482#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) 483#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
@@ -787,7 +788,7 @@ struct bnxt_link_info {
787#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 788#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
788#define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 789#define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
789#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 790#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
790#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK 791#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
791#define PHY_VER_LEN 3 792#define PHY_VER_LEN 3
792 u8 phy_ver[PHY_VER_LEN]; 793 u8 phy_ver[PHY_VER_LEN];
793 u16 link_speed; 794 u16 link_speed;
@@ -812,7 +813,6 @@ struct bnxt_link_info {
812#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 813#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
813#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 814#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
814 u16 lp_auto_link_speeds; 815 u16 lp_auto_link_speeds;
815 u16 auto_link_speed;
816 u16 force_link_speed; 816 u16 force_link_speed;
817 u32 preemphasis; 817 u32 preemphasis;
818 818
@@ -825,6 +825,8 @@ struct bnxt_link_info {
825 u16 req_link_speed; 825 u16 req_link_speed;
826 u32 advertising; 826 u32 advertising;
827 bool force_link_chng; 827 bool force_link_chng;
828
829 u8 last_port_module_event;
828 /* a copy of phy_qcfg output used to report link 830 /* a copy of phy_qcfg output used to report link
829 * info to VF 831 * info to VF
830 */ 832 */
@@ -874,6 +876,7 @@ struct bnxt {
874 #define BNXT_FLAG_RFS 0x100 876 #define BNXT_FLAG_RFS 0x100
875 #define BNXT_FLAG_SHARED_RINGS 0x200 877 #define BNXT_FLAG_SHARED_RINGS 0x200
876 #define BNXT_FLAG_PORT_STATS 0x400 878 #define BNXT_FLAG_PORT_STATS 0x400
879 #define BNXT_FLAG_EEE_CAP 0x1000
877 880
878 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ 881 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
879 BNXT_FLAG_RFS | \ 882 BNXT_FLAG_RFS | \
@@ -939,6 +942,7 @@ struct bnxt {
939 942
940 u32 msg_enable; 943 u32 msg_enable;
941 944
945 u32 hwrm_spec_code;
942 u16 hwrm_cmd_seq; 946 u16 hwrm_cmd_seq;
943 u32 hwrm_intr_seq_id; 947 u32 hwrm_intr_seq_id;
944 void *hwrm_cmd_resp_addr; 948 void *hwrm_cmd_resp_addr;
@@ -953,6 +957,7 @@ struct bnxt {
953 dma_addr_t hw_tx_port_stats_map; 957 dma_addr_t hw_tx_port_stats_map;
954 int hw_port_stats_size; 958 int hw_port_stats_size;
955 959
960 u16 hwrm_max_req_len;
956 int hwrm_cmd_timeout; 961 int hwrm_cmd_timeout;
957 struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ 962 struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
958 struct hwrm_ver_get_output ver_resp; 963 struct hwrm_ver_get_output ver_resp;
@@ -989,6 +994,7 @@ struct bnxt {
989#define BNXT_RST_RING_SP_EVENT 7 994#define BNXT_RST_RING_SP_EVENT 7
990#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8 995#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
991#define BNXT_PERIODIC_STATS_SP_EVENT 9 996#define BNXT_PERIODIC_STATS_SP_EVENT 9
997#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10
992 998
993 struct bnxt_pf_info pf; 999 struct bnxt_pf_info pf;
994#ifdef CONFIG_BNXT_SRIOV 1000#ifdef CONFIG_BNXT_SRIOV
@@ -1009,6 +1015,9 @@ struct bnxt {
1009 int ntp_fltr_count; 1015 int ntp_fltr_count;
1010 1016
1011 struct bnxt_link_info link_info; 1017 struct bnxt_link_info link_info;
1018 struct ethtool_eee eee;
1019 u32 lpi_tmr_lo;
1020 u32 lpi_tmr_hi;
1012}; 1021};
1013 1022
1014#ifdef CONFIG_NET_RX_BUSY_POLL 1023#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -1106,7 +1115,7 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
1106int bnxt_hwrm_set_coal(struct bnxt *); 1115int bnxt_hwrm_set_coal(struct bnxt *);
1107int bnxt_hwrm_func_qcaps(struct bnxt *); 1116int bnxt_hwrm_func_qcaps(struct bnxt *);
1108int bnxt_hwrm_set_pause(struct bnxt *); 1117int bnxt_hwrm_set_pause(struct bnxt *);
1109int bnxt_hwrm_set_link_setting(struct bnxt *, bool); 1118int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
1110int bnxt_open_nic(struct bnxt *, bool, bool); 1119int bnxt_open_nic(struct bnxt *, bool, bool);
1111int bnxt_close_nic(struct bnxt *, bool, bool); 1120int bnxt_close_nic(struct bnxt *, bool, bool);
1112int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); 1121int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9ada1662b651..a2e93241b06b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1,6 +1,6 @@
1/* Broadcom NetXtreme-C/E network driver. 1/* Broadcom NetXtreme-C/E network driver.
2 * 2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation 3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -597,7 +597,7 @@ static void bnxt_get_drvinfo(struct net_device *dev,
597 kfree(pkglog); 597 kfree(pkglog);
598} 598}
599 599
600static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) 600u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
601{ 601{
602 u32 speed_mask = 0; 602 u32 speed_mask = 0;
603 603
@@ -698,10 +698,23 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
698 if (link_info->phy_link_status == BNXT_LINK_LINK) 698 if (link_info->phy_link_status == BNXT_LINK_LINK)
699 cmd->lp_advertising = 699 cmd->lp_advertising =
700 bnxt_fw_to_ethtool_lp_adv(link_info); 700 bnxt_fw_to_ethtool_lp_adv(link_info);
701 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
702 if (!netif_carrier_ok(dev))
703 cmd->duplex = DUPLEX_UNKNOWN;
704 else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
705 cmd->duplex = DUPLEX_FULL;
706 else
707 cmd->duplex = DUPLEX_HALF;
701 } else { 708 } else {
702 cmd->autoneg = AUTONEG_DISABLE; 709 cmd->autoneg = AUTONEG_DISABLE;
703 cmd->advertising = 0; 710 cmd->advertising = 0;
711 ethtool_speed =
712 bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
713 cmd->duplex = DUPLEX_HALF;
714 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
715 cmd->duplex = DUPLEX_FULL;
704 } 716 }
717 ethtool_cmd_speed_set(cmd, ethtool_speed);
705 718
706 cmd->port = PORT_NONE; 719 cmd->port = PORT_NONE;
707 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 720 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
@@ -719,16 +732,8 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
719 cmd->port = PORT_FIBRE; 732 cmd->port = PORT_FIBRE;
720 } 733 }
721 734
722 if (link_info->phy_link_status == BNXT_LINK_LINK) {
723 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
724 cmd->duplex = DUPLEX_FULL;
725 } else {
726 cmd->duplex = DUPLEX_UNKNOWN;
727 }
728 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
729 ethtool_cmd_speed_set(cmd, ethtool_speed);
730 if (link_info->transceiver == 735 if (link_info->transceiver ==
731 PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL) 736 PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL)
732 cmd->transceiver = XCVR_INTERNAL; 737 cmd->transceiver = XCVR_INTERNAL;
733 else 738 else
734 cmd->transceiver = XCVR_EXTERNAL; 739 cmd->transceiver = XCVR_EXTERNAL;
@@ -739,31 +744,52 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
739 744
740static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed) 745static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
741{ 746{
747 struct bnxt *bp = netdev_priv(dev);
748 struct bnxt_link_info *link_info = &bp->link_info;
749 u16 support_spds = link_info->support_speeds;
750 u32 fw_speed = 0;
751
742 switch (ethtool_speed) { 752 switch (ethtool_speed) {
743 case SPEED_100: 753 case SPEED_100:
744 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB; 754 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
755 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
756 break;
745 case SPEED_1000: 757 case SPEED_1000:
746 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB; 758 if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
759 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
760 break;
747 case SPEED_2500: 761 case SPEED_2500:
748 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB; 762 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
763 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
764 break;
749 case SPEED_10000: 765 case SPEED_10000:
750 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB; 766 if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
767 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
768 break;
751 case SPEED_20000: 769 case SPEED_20000:
752 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB; 770 if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
771 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
772 break;
753 case SPEED_25000: 773 case SPEED_25000:
754 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB; 774 if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
775 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
776 break;
755 case SPEED_40000: 777 case SPEED_40000:
756 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB; 778 if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
779 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
780 break;
757 case SPEED_50000: 781 case SPEED_50000:
758 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB; 782 if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
783 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
784 break;
759 default: 785 default:
760 netdev_err(dev, "unsupported speed!\n"); 786 netdev_err(dev, "unsupported speed!\n");
761 break; 787 break;
762 } 788 }
763 return 0; 789 return fw_speed;
764} 790}
765 791
766static u16 bnxt_get_fw_auto_link_speeds(u32 advertising) 792u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
767{ 793{
768 u16 fw_speed_mask = 0; 794 u16 fw_speed_mask = 0;
769 795
@@ -823,6 +849,8 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
823 */ 849 */
824 set_pause = true; 850 set_pause = true;
825 } else { 851 } else {
852 u16 fw_speed;
853
826 /* TODO: currently don't support half duplex */ 854 /* TODO: currently don't support half duplex */
827 if (cmd->duplex == DUPLEX_HALF) { 855 if (cmd->duplex == DUPLEX_HALF) {
828 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 856 netdev_err(dev, "HALF DUPLEX is not supported!\n");
@@ -833,14 +861,19 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
833 if (cmd->duplex == DUPLEX_UNKNOWN) 861 if (cmd->duplex == DUPLEX_UNKNOWN)
834 cmd->duplex = DUPLEX_FULL; 862 cmd->duplex = DUPLEX_FULL;
835 speed = ethtool_cmd_speed(cmd); 863 speed = ethtool_cmd_speed(cmd);
836 link_info->req_link_speed = bnxt_get_fw_speed(dev, speed); 864 fw_speed = bnxt_get_fw_speed(dev, speed);
865 if (!fw_speed) {
866 rc = -EINVAL;
867 goto set_setting_exit;
868 }
869 link_info->req_link_speed = fw_speed;
837 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 870 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
838 link_info->autoneg = 0; 871 link_info->autoneg = 0;
839 link_info->advertising = 0; 872 link_info->advertising = 0;
840 } 873 }
841 874
842 if (netif_running(dev)) 875 if (netif_running(dev))
843 rc = bnxt_hwrm_set_link_setting(bp, set_pause); 876 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
844 877
845set_setting_exit: 878set_setting_exit:
846 return rc; 879 return rc;
@@ -855,10 +888,8 @@ static void bnxt_get_pauseparam(struct net_device *dev,
855 if (BNXT_VF(bp)) 888 if (BNXT_VF(bp))
856 return; 889 return;
857 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 890 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
858 epause->rx_pause = 891 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
859 ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) != 0); 892 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
860 epause->tx_pause =
861 ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_TX) != 0);
862} 893}
863 894
864static int bnxt_set_pauseparam(struct net_device *dev, 895static int bnxt_set_pauseparam(struct net_device *dev,
@@ -876,7 +907,9 @@ static int bnxt_set_pauseparam(struct net_device *dev,
876 return -EINVAL; 907 return -EINVAL;
877 908
878 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 909 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
879 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH; 910 if (bp->hwrm_spec_code >= 0x10201)
911 link_info->req_flow_ctrl =
912 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
880 } else { 913 } else {
881 /* when transition from auto pause to force pause, 914 /* when transition from auto pause to force pause,
882 * force a link change 915 * force a link change
@@ -884,17 +917,13 @@ static int bnxt_set_pauseparam(struct net_device *dev,
884 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 917 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
885 link_info->force_link_chng = true; 918 link_info->force_link_chng = true;
886 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; 919 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
887 link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH; 920 link_info->req_flow_ctrl = 0;
888 } 921 }
889 if (epause->rx_pause) 922 if (epause->rx_pause)
890 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; 923 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
891 else
892 link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
893 924
894 if (epause->tx_pause) 925 if (epause->tx_pause)
895 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 926 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
896 else
897 link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
898 927
899 if (netif_running(dev)) 928 if (netif_running(dev))
900 rc = bnxt_hwrm_set_pause(bp); 929 rc = bnxt_hwrm_set_pause(bp);
@@ -1383,6 +1412,80 @@ static int bnxt_set_eeprom(struct net_device *dev,
1383 eeprom->len); 1412 eeprom->len);
1384} 1413}
1385 1414
1415static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1416{
1417 struct bnxt *bp = netdev_priv(dev);
1418 struct ethtool_eee *eee = &bp->eee;
1419 struct bnxt_link_info *link_info = &bp->link_info;
1420 u32 advertising =
1421 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
1422 int rc = 0;
1423
1424 if (BNXT_VF(bp))
1425 return 0;
1426
1427 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
1428 return -EOPNOTSUPP;
1429
1430 if (!edata->eee_enabled)
1431 goto eee_ok;
1432
1433 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
1434 netdev_warn(dev, "EEE requires autoneg\n");
1435 return -EINVAL;
1436 }
1437 if (edata->tx_lpi_enabled) {
1438 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
1439 edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
1440 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
1441 bp->lpi_tmr_lo, bp->lpi_tmr_hi);
1442 return -EINVAL;
1443 } else if (!bp->lpi_tmr_hi) {
1444 edata->tx_lpi_timer = eee->tx_lpi_timer;
1445 }
1446 }
1447 if (!edata->advertised) {
1448 edata->advertised = advertising & eee->supported;
1449 } else if (edata->advertised & ~advertising) {
1450 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
1451 edata->advertised, advertising);
1452 return -EINVAL;
1453 }
1454
1455 eee->advertised = edata->advertised;
1456 eee->tx_lpi_enabled = edata->tx_lpi_enabled;
1457 eee->tx_lpi_timer = edata->tx_lpi_timer;
1458eee_ok:
1459 eee->eee_enabled = edata->eee_enabled;
1460
1461 if (netif_running(dev))
1462 rc = bnxt_hwrm_set_link_setting(bp, false, true);
1463
1464 return rc;
1465}
1466
1467static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
1468{
1469 struct bnxt *bp = netdev_priv(dev);
1470
1471 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
1472 return -EOPNOTSUPP;
1473
1474 *edata = bp->eee;
1475 if (!bp->eee.eee_enabled) {
1476 /* Preserve tx_lpi_timer so that the last value will be used
1477 * by default when it is re-enabled.
1478 */
1479 edata->advertised = 0;
1480 edata->tx_lpi_enabled = 0;
1481 }
1482
1483 if (!bp->eee.eee_active)
1484 edata->lp_advertised = 0;
1485
1486 return 0;
1487}
1488
1386const struct ethtool_ops bnxt_ethtool_ops = { 1489const struct ethtool_ops bnxt_ethtool_ops = {
1387 .get_settings = bnxt_get_settings, 1490 .get_settings = bnxt_get_settings,
1388 .set_settings = bnxt_set_settings, 1491 .set_settings = bnxt_set_settings,
@@ -1411,4 +1514,6 @@ const struct ethtool_ops bnxt_ethtool_ops = {
1411 .get_eeprom = bnxt_get_eeprom, 1514 .get_eeprom = bnxt_get_eeprom,
1412 .set_eeprom = bnxt_set_eeprom, 1515 .set_eeprom = bnxt_set_eeprom,
1413 .get_link = bnxt_get_link, 1516 .get_link = bnxt_get_link,
1517 .get_eee = bnxt_get_eee,
1518 .set_eee = bnxt_set_eee,
1414}; 1519};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 98fa81e08b58..3abc03b60dbc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -1,6 +1,6 @@
1/* Broadcom NetXtreme-C/E network driver. 1/* Broadcom NetXtreme-C/E network driver.
2 * 2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation 3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -12,6 +12,8 @@
12 12
13extern const struct ethtool_ops bnxt_ethtool_ops; 13extern const struct ethtool_ops bnxt_ethtool_ops;
14 14
15u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
15u32 bnxt_fw_to_ethtool_speed(u16); 16u32 bnxt_fw_to_ethtool_speed(u16);
17u16 bnxt_get_fw_auto_link_speeds(u32);
16 18
17#endif 19#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
index e0aac65c6d82..461675caaacd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -1,6 +1,6 @@
1/* Broadcom NetXtreme-C/E network driver. 1/* Broadcom NetXtreme-C/E network driver.
2 * 2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation 3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 4badbedcb421..80f95560086d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,6 +1,6 @@
1/* Broadcom NetXtreme-C/E network driver. 1/* Broadcom NetXtreme-C/E network driver.
2 * 2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation 3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -104,6 +104,7 @@ struct hwrm_async_event_cmpl {
104 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0) 104 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
105 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) 105 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
106 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0) 106 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
107 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
107 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) 108 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
108 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) 109 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
109 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) 110 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
@@ -111,6 +112,7 @@ struct hwrm_async_event_cmpl {
111 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0) 112 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
112 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0) 113 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
113 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0) 114 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
115 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
114 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0) 116 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
115 __le32 event_data2; 117 __le32 event_data2;
116 u8 opaque_v; 118 u8 opaque_v;
@@ -141,6 +143,7 @@ struct hwrm_async_event_cmpl_link_status_change {
141 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL 143 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
142 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0) 144 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
143 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0) 145 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
146 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
144 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL 147 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
145 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1 148 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
146 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL 149 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
@@ -195,6 +198,9 @@ struct hwrm_async_event_cmpl_link_speed_change {
195 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1) 198 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
196 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1) 199 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
197 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1) 200 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
201 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
202 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB (0xffffUL << 1)
203 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB
198 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL 204 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
199 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16 205 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
200}; 206};
@@ -237,6 +243,55 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed {
237 __le32 event_data1; 243 __le32 event_data1;
238 #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL 244 #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
239 #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 245 #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
246 #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
247 #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
248 #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
249 #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
250 #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
251 #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
252 #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
253};
254
255/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
256struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
257 __le16 type;
258 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
259 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
260 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
261 __le16 event_id;
262 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
263 __le32 event_data2;
264 u8 opaque_v;
265 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
266 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
267 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
268 u8 timestamp_lo;
269 __le16 timestamp_hi;
270 __le32 event_data1;
271 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
272 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
273};
274
275/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
276struct hwrm_async_event_cmpl_link_speed_cfg_change {
277 __le16 type;
278 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
279 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
280 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
281 __le16 event_id;
282 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
283 __le32 event_data2;
284 u8 opaque_v;
285 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
286 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
287 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
288 u8 timestamp_lo;
289 __le16 timestamp_hi;
290 __le32 event_data1;
291 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
292 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
293 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
294 #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
240}; 295};
241 296
242/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */ 297/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
@@ -363,6 +418,47 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change {
363 #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0 418 #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
364}; 419};
365 420
421/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
422struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
423 __le16 type;
424 #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
425 #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
426 #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
427 __le16 event_id;
428 #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
429 __le32 event_data2;
430 u8 opaque_v;
431 #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
432 #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
433 #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
434 u8 timestamp_lo;
435 __le16 timestamp_hi;
436 __le32 event_data1;
437 #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
438};
439
440/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
441struct hwrm_async_event_cmpl_vf_cfg_change {
442 __le16 type;
443 #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
444 #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
445 #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
446 __le16 event_id;
447 #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
448 __le32 event_data2;
449 u8 opaque_v;
450 #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
451 #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
452 #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
453 u8 timestamp_lo;
454 __le16 timestamp_hi;
455 __le32 event_data1;
456 #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
457 #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
458 #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
459 #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
460};
461
366/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */ 462/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
367struct hwrm_async_event_cmpl_hwrm_error { 463struct hwrm_async_event_cmpl_hwrm_error {
368 __le16 type; 464 __le16 type;
@@ -377,6 +473,7 @@ struct hwrm_async_event_cmpl_hwrm_error {
377 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0) 473 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
378 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0) 474 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
379 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0) 475 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
476 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
380 u8 opaque_v; 477 u8 opaque_v;
381 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL 478 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
382 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL 479 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
@@ -387,12 +484,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
387 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL 484 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
388}; 485};
389 486
390/* HW Resource Manager Specification 1.0.0 */ 487/* HW Resource Manager Specification 1.2.2 */
391#define HWRM_VERSION_MAJOR 1 488#define HWRM_VERSION_MAJOR 1
392#define HWRM_VERSION_MINOR 0 489#define HWRM_VERSION_MINOR 2
393#define HWRM_VERSION_UPDATE 0 490#define HWRM_VERSION_UPDATE 2
394 491
395#define HWRM_VERSION_STR "1.0.0" 492#define HWRM_VERSION_STR "1.2.2"
396/* 493/*
397 * Following is the signature for HWRM message field that indicates not 494 * Following is the signature for HWRM message field that indicates not
398 * applicable (All F's). Need to cast it the size of the field if needed. 495 * applicable (All F's). Need to cast it the size of the field if needed.
@@ -444,7 +541,7 @@ struct cmd_nums {
444 #define HWRM_FUNC_BUF_RGTR (0x1fUL) 541 #define HWRM_FUNC_BUF_RGTR (0x1fUL)
445 #define HWRM_PORT_PHY_CFG (0x20UL) 542 #define HWRM_PORT_PHY_CFG (0x20UL)
446 #define HWRM_PORT_MAC_CFG (0x21UL) 543 #define HWRM_PORT_MAC_CFG (0x21UL)
447 #define RESERVED2 (0x22UL) 544 #define HWRM_PORT_TS_QUERY (0x22UL)
448 #define HWRM_PORT_QSTATS (0x23UL) 545 #define HWRM_PORT_QSTATS (0x23UL)
449 #define HWRM_PORT_LPBK_QSTATS (0x24UL) 546 #define HWRM_PORT_LPBK_QSTATS (0x24UL)
450 #define HWRM_PORT_CLR_STATS (0x25UL) 547 #define HWRM_PORT_CLR_STATS (0x25UL)
@@ -452,6 +549,9 @@ struct cmd_nums {
452 #define HWRM_PORT_PHY_QCFG (0x27UL) 549 #define HWRM_PORT_PHY_QCFG (0x27UL)
453 #define HWRM_PORT_MAC_QCFG (0x28UL) 550 #define HWRM_PORT_MAC_QCFG (0x28UL)
454 #define HWRM_PORT_BLINK_LED (0x29UL) 551 #define HWRM_PORT_BLINK_LED (0x29UL)
552 #define HWRM_PORT_PHY_QCAPS (0x2aUL)
553 #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
554 #define HWRM_PORT_PHY_I2C_READ (0x2cUL)
455 #define HWRM_QUEUE_QPORTCFG (0x30UL) 555 #define HWRM_QUEUE_QPORTCFG (0x30UL)
456 #define HWRM_QUEUE_QCFG (0x31UL) 556 #define HWRM_QUEUE_QCFG (0x31UL)
457 #define HWRM_QUEUE_CFG (0x32UL) 557 #define HWRM_QUEUE_CFG (0x32UL)
@@ -531,6 +631,7 @@ struct cmd_nums {
531 __le16 unused_0[3]; 631 __le16 unused_0[3];
532}; 632};
533 633
634/* Return Codes (8 bytes) */
534struct ret_codes { 635struct ret_codes {
535 __le16 error_code; 636 __le16 error_code;
536 #define HWRM_ERR_CODE_SUCCESS (0x0UL) 637 #define HWRM_ERR_CODE_SUCCESS (0x0UL)
@@ -875,10 +976,11 @@ struct hwrm_func_vf_cfg_input {
875 #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL 976 #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
876 #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL 977 #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
877 #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL 978 #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
979 #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
878 __le16 mtu; 980 __le16 mtu;
879 __le16 guest_vlan; 981 __le16 guest_vlan;
880 __le16 async_event_cr; 982 __le16 async_event_cr;
881 __le16 unused_0[3]; 983 u8 dflt_mac_addr[6];
882}; 984};
883 985
884/* Output (16 bytes) */ 986/* Output (16 bytes) */
@@ -917,7 +1019,8 @@ struct hwrm_func_qcaps_output {
917 __le32 flags; 1019 __le32 flags;
918 #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL 1020 #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
919 #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL 1021 #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
920 u8 perm_mac_address[6]; 1022 #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
1023 u8 mac_address[6];
921 __le16 max_rsscos_ctx; 1024 __le16 max_rsscos_ctx;
922 __le16 max_cmpl_rings; 1025 __le16 max_cmpl_rings;
923 __le16 max_tx_rings; 1026 __le16 max_tx_rings;
@@ -942,6 +1045,67 @@ struct hwrm_func_qcaps_output {
942 u8 valid; 1045 u8 valid;
943}; 1046};
944 1047
1048/* hwrm_func_qcfg */
1049/* Input (24 bytes) */
1050struct hwrm_func_qcfg_input {
1051 __le16 req_type;
1052 __le16 cmpl_ring;
1053 __le16 seq_id;
1054 __le16 target_id;
1055 __le64 resp_addr;
1056 __le16 fid;
1057 __le16 unused_0[3];
1058};
1059
1060/* Output (72 bytes) */
1061struct hwrm_func_qcfg_output {
1062 __le16 error_code;
1063 __le16 req_type;
1064 __le16 seq_id;
1065 __le16 resp_len;
1066 __le16 fid;
1067 __le16 port_id;
1068 __le16 vlan;
1069 u8 unused_0;
1070 u8 unused_1;
1071 u8 mac_address[6];
1072 __le16 pci_id;
1073 __le16 alloc_rsscos_ctx;
1074 __le16 alloc_cmpl_rings;
1075 __le16 alloc_tx_rings;
1076 __le16 alloc_rx_rings;
1077 __le16 alloc_l2_ctx;
1078 __le16 alloc_vnics;
1079 __le16 mtu;
1080 __le16 mru;
1081 __le16 stat_ctx_id;
1082 u8 port_partition_type;
1083 #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF (0x0UL << 0)
1084 #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS (0x1UL << 0)
1085 #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 (0x2UL << 0)
1086 #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0)
1087 #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0)
1088 #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0)
1089 u8 unused_2;
1090 __le16 dflt_vnic_id;
1091 u8 unused_3;
1092 u8 unused_4;
1093 __le32 min_bw;
1094 __le32 max_bw;
1095 u8 evb_mode;
1096 #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0)
1097 #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0)
1098 #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0)
1099 u8 unused_5;
1100 __le16 unused_6;
1101 __le32 alloc_mcast_filters;
1102 __le32 alloc_hw_ring_grps;
1103 u8 unused_7;
1104 u8 unused_8;
1105 u8 unused_9;
1106 u8 valid;
1107};
1108
945/* hwrm_func_cfg */ 1109/* hwrm_func_cfg */
946/* Input (88 bytes) */ 1110/* Input (88 bytes) */
947struct hwrm_func_cfg_input { 1111struct hwrm_func_cfg_input {
@@ -1171,6 +1335,7 @@ struct hwrm_func_drv_rgtr_input {
1171 #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0) 1335 #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0)
1172 #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0) 1336 #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0)
1173 #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0) 1337 #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0)
1338 #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS (0x12UL << 0)
1174 #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0) 1339 #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0)
1175 #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0) 1340 #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0)
1176 #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0) 1341 #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0)
@@ -1302,6 +1467,7 @@ struct hwrm_func_drv_qver_output {
1302 #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0) 1467 #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0)
1303 #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0) 1468 #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0)
1304 #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0) 1469 #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0)
1470 #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS (0x12UL << 0)
1305 #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0) 1471 #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0)
1306 #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0) 1472 #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0)
1307 #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0) 1473 #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0)
@@ -1317,7 +1483,7 @@ struct hwrm_func_drv_qver_output {
1317}; 1483};
1318 1484
1319/* hwrm_port_phy_cfg */ 1485/* hwrm_port_phy_cfg */
1320/* Input (48 bytes) */ 1486/* Input (56 bytes) */
1321struct hwrm_port_phy_cfg_input { 1487struct hwrm_port_phy_cfg_input {
1322 __le16 req_type; 1488 __le16 req_type;
1323 __le16 cmpl_ring; 1489 __le16 cmpl_ring;
@@ -1329,6 +1495,10 @@ struct hwrm_port_phy_cfg_input {
1329 #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL 1495 #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL
1330 #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL 1496 #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
1331 #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL 1497 #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
1498 #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
1499 #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
1500 #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
1501 #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
1332 __le32 enables; 1502 __le32 enables;
1333 #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL 1503 #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
1334 #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL 1504 #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
@@ -1339,6 +1509,8 @@ struct hwrm_port_phy_cfg_input {
1339 #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL 1509 #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
1340 #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL 1510 #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
1341 #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL 1511 #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
1512 #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
1513 #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
1342 __le16 port_id; 1514 __le16 port_id;
1343 __le16 force_link_speed; 1515 __le16 force_link_speed;
1344 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0) 1516 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0)
@@ -1350,12 +1522,14 @@ struct hwrm_port_phy_cfg_input {
1350 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0) 1522 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
1351 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0) 1523 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0)
1352 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0) 1524 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
1525 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
1526 #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
1353 u8 auto_mode; 1527 u8 auto_mode;
1354 #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0) 1528 #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0)
1355 #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) 1529 #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
1356 #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0) 1530 #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0)
1357 #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) 1531 #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
1358 #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK (0x4UL << 0) 1532 #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK (0x4UL << 0)
1359 u8 auto_duplex; 1533 u8 auto_duplex;
1360 #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0) 1534 #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0)
1361 #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0) 1535 #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0)
@@ -1363,6 +1537,7 @@ struct hwrm_port_phy_cfg_input {
1363 u8 auto_pause; 1537 u8 auto_pause;
1364 #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL 1538 #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
1365 #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL 1539 #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
1540 #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
1366 u8 unused_0; 1541 u8 unused_0;
1367 __le16 auto_link_speed; 1542 __le16 auto_link_speed;
1368 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0) 1543 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0)
@@ -1374,6 +1549,8 @@ struct hwrm_port_phy_cfg_input {
1374 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0) 1549 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
1375 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0) 1550 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0)
1376 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0) 1551 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
1552 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
1553 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
1377 __le16 auto_link_speed_mask; 1554 __le16 auto_link_speed_mask;
1378 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL 1555 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
1379 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL 1556 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1386,6 +1563,9 @@ struct hwrm_port_phy_cfg_input {
1386 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL 1563 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
1387 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL 1564 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
1388 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL 1565 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
1566 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
1567 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
1568 #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
1389 u8 wirespeed; 1569 u8 wirespeed;
1390 #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0) 1570 #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0)
1391 #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0) 1571 #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0)
@@ -1398,7 +1578,20 @@ struct hwrm_port_phy_cfg_input {
1398 #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL 1578 #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
1399 u8 unused_1; 1579 u8 unused_1;
1400 __le32 preemphasis; 1580 __le32 preemphasis;
1401 __le32 unused_2; 1581 __le16 eee_link_speed_mask;
1582 #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
1583 #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
1584 #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
1585 #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
1586 #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
1587 #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
1588 #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
1589 u8 unused_2;
1590 u8 unused_3;
1591 __le32 tx_lpi_timer;
1592 __le32 unused_4;
1593 #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
1594 #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
1402}; 1595};
1403 1596
1404/* Output (16 bytes) */ 1597/* Output (16 bytes) */
@@ -1426,7 +1619,7 @@ struct hwrm_port_phy_qcfg_input {
1426 __le16 unused_0[3]; 1619 __le16 unused_0[3];
1427}; 1620};
1428 1621
1429/* Output (48 bytes) */ 1622/* Output (96 bytes) */
1430struct hwrm_port_phy_qcfg_output { 1623struct hwrm_port_phy_qcfg_output {
1431 __le16 error_code; 1624 __le16 error_code;
1432 __le16 req_type; 1625 __le16 req_type;
@@ -1447,6 +1640,8 @@ struct hwrm_port_phy_qcfg_output {
1447 #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0) 1640 #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0)
1448 #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0) 1641 #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0)
1449 #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0) 1642 #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0)
1643 #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB (0x3e8UL << 0)
1644 #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB (0xffffUL << 0)
1450 u8 duplex; 1645 u8 duplex;
1451 #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0) 1646 #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0)
1452 #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0) 1647 #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0)
@@ -1465,6 +1660,9 @@ struct hwrm_port_phy_qcfg_output {
1465 #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL 1660 #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
1466 #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL 1661 #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
1467 #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL 1662 #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
1663 #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
1664 #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
1665 #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
1468 __le16 force_link_speed; 1666 __le16 force_link_speed;
1469 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0) 1667 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0)
1470 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0) 1668 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0)
@@ -1475,15 +1673,18 @@ struct hwrm_port_phy_qcfg_output {
1475 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0) 1673 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
1476 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0) 1674 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0)
1477 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0) 1675 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
1676 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
1677 #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
1478 u8 auto_mode; 1678 u8 auto_mode;
1479 #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0) 1679 #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0)
1480 #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) 1680 #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
1481 #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0) 1681 #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0)
1482 #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) 1682 #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
1483 #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK (0x4UL << 0) 1683 #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK (0x4UL << 0)
1484 u8 auto_pause; 1684 u8 auto_pause;
1485 #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL 1685 #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
1486 #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL 1686 #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
1687 #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
1487 __le16 auto_link_speed; 1688 __le16 auto_link_speed;
1488 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0) 1689 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0)
1489 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0) 1690 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0)
@@ -1494,6 +1695,8 @@ struct hwrm_port_phy_qcfg_output {
1494 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0) 1695 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
1495 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0) 1696 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0)
1496 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0) 1697 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
1698 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
1699 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
1497 __le16 auto_link_speed_mask; 1700 __le16 auto_link_speed_mask;
1498 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL 1701 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
1499 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL 1702 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1506,6 +1709,9 @@ struct hwrm_port_phy_qcfg_output {
1506 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL 1709 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
1507 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL 1710 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
1508 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL 1711 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
1712 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
1713 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
1714 #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
1509 u8 wirespeed; 1715 u8 wirespeed;
1510 #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0) 1716 #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0)
1511 #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0) 1717 #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0)
@@ -1516,31 +1722,49 @@ struct hwrm_port_phy_qcfg_output {
1516 u8 force_pause; 1722 u8 force_pause;
1517 #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL 1723 #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
1518 #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL 1724 #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
1519 u8 reserved1; 1725 u8 module_status;
1726 #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE (0x0UL << 0)
1727 #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX (0x1UL << 0)
1728 #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG (0x2UL << 0)
1729 #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN (0x3UL << 0)
1730 #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED (0x4UL << 0)
1731 #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE (0xffUL << 0)
1520 __le32 preemphasis; 1732 __le32 preemphasis;
1521 u8 phy_maj; 1733 u8 phy_maj;
1522 u8 phy_min; 1734 u8 phy_min;
1523 u8 phy_bld; 1735 u8 phy_bld;
1524 u8 phy_type; 1736 u8 phy_type;
1525 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4 (0x1UL << 0) 1737 #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN (0x0UL << 0)
1738 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR (0x1UL << 0)
1526 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0) 1739 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0)
1527 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4 (0x3UL << 0) 1740 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR (0x3UL << 0)
1528 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4 (0x4UL << 0) 1741 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR (0x4UL << 0)
1529 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0) 1742 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0)
1530 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4 (0x6UL << 0) 1743 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX (0x6UL << 0)
1531 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0) 1744 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0)
1532 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0) 1745 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0)
1746 #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE (0x9UL << 0)
1747 #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY (0xaUL << 0)
1533 u8 media_type; 1748 u8 media_type;
1749 #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN (0x0UL << 0)
1534 #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0) 1750 #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0)
1535 #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0) 1751 #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0)
1536 #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0) 1752 #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0)
1537 u8 transceiver_type; 1753 u8 xcvr_pkg_type;
1538 #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0) 1754 #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL (0x1UL << 0)
1539 #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0) 1755 #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL (0x2UL << 0)
1540 u8 phy_addr; 1756 u8 eee_config_phy_addr;
1541 #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL 1757 #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
1542 #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0 1758 #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
1543 u8 unused_2; 1759 #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
1760 #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
1761 #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
1762 #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
1763 #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
1764 u8 parallel_detect;
1765 #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
1766 #define PORT_PHY_QCFG_RESP_RESERVED_MASK 0xfeUL
1767 #define PORT_PHY_QCFG_RESP_RESERVED_SFT 1
1544 __le16 link_partner_adv_speeds; 1768 __le16 link_partner_adv_speeds;
1545 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL 1769 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
1546 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL 1770 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
@@ -1553,15 +1777,48 @@ struct hwrm_port_phy_qcfg_output {
1553 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL 1777 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
1554 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL 1778 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
1555 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL 1779 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
1780 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
1781 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
1782 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
1556 u8 link_partner_adv_auto_mode; 1783 u8 link_partner_adv_auto_mode;
1557 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0) 1784 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
1558 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) 1785 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
1559 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0) 1786 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
1560 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) 1787 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
1561 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0) 1788 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK (0x4UL << 0)
1562 u8 link_partner_adv_pause; 1789 u8 link_partner_adv_pause;
1563 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL 1790 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
1564 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL 1791 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
1792 __le16 adv_eee_link_speed_mask;
1793 #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
1794 #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
1795 #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
1796 #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
1797 #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
1798 #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
1799 #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
1800 __le16 link_partner_adv_eee_link_speed_mask;
1801 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
1802 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
1803 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
1804 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
1805 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
1806 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
1807 #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
1808 __le32 xcvr_identifier_type_tx_lpi_timer;
1809 #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
1810 #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
1811 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
1812 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
1813 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
1814 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
1815 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
1816 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
1817 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
1818 __le32 unused_1;
1819 char phy_vendor_name[16];
1820 char phy_vendor_partnumber[16];
1821 __le32 unused_2;
1565 u8 unused_3; 1822 u8 unused_3;
1566 u8 unused_4; 1823 u8 unused_4;
1567 u8 unused_5; 1824 u8 unused_5;
@@ -1569,7 +1826,7 @@ struct hwrm_port_phy_qcfg_output {
1569}; 1826};
1570 1827
1571/* hwrm_port_mac_cfg */ 1828/* hwrm_port_mac_cfg */
1572/* Input (32 bytes) */ 1829/* Input (40 bytes) */
1573struct hwrm_port_mac_cfg_input { 1830struct hwrm_port_mac_cfg_input {
1574 __le16 req_type; 1831 __le16 req_type;
1575 __le16 cmpl_ring; 1832 __le16 cmpl_ring;
@@ -1581,6 +1838,10 @@ struct hwrm_port_mac_cfg_input {
1581 #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL 1838 #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL
1582 #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL 1839 #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
1583 #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL 1840 #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
1841 #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
1842 #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
1843 #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
1844 #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
1584 __le32 enables; 1845 __le32 enables;
1585 #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL 1846 #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
1586 #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL 1847 #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -1588,6 +1849,8 @@ struct hwrm_port_mac_cfg_input {
1588 #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL 1849 #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL
1589 #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL 1850 #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
1590 #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL 1851 #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
1852 #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
1853 #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
1591 __le16 port_id; 1854 __le16 port_id;
1592 u8 ipg; 1855 u8 ipg;
1593 u8 lpbk; 1856 u8 lpbk;
@@ -1598,6 +1861,9 @@ struct hwrm_port_mac_cfg_input {
1598 u8 lcos_map_pri; 1861 u8 lcos_map_pri;
1599 u8 tunnel_pri2cos_map_pri; 1862 u8 tunnel_pri2cos_map_pri;
1600 u8 dscp2pri_map_pri; 1863 u8 dscp2pri_map_pri;
1864 __le16 rx_ts_capture_ptp_msg_type;
1865 __le16 tx_ts_capture_ptp_msg_type;
1866 __le32 unused_0;
1601}; 1867};
1602 1868
1603/* Output (16 bytes) */ 1869/* Output (16 bytes) */
@@ -1754,7 +2020,79 @@ struct hwrm_port_blink_led_output {
1754 u8 valid; 2020 u8 valid;
1755}; 2021};
1756 2022
1757/* hwrm_queue_qportcfg */ 2023/* hwrm_port_phy_qcaps */
2024/* Input (24 bytes) */
2025struct hwrm_port_phy_qcaps_input {
2026 __le16 req_type;
2027 __le16 cmpl_ring;
2028 __le16 seq_id;
2029 __le16 target_id;
2030 __le64 resp_addr;
2031 __le16 port_id;
2032 __le16 unused_0[3];
2033};
2034
2035/* Output (24 bytes) */
2036struct hwrm_port_phy_qcaps_output {
2037 __le16 error_code;
2038 __le16 req_type;
2039 __le16 seq_id;
2040 __le16 resp_len;
2041 u8 eee_supported;
2042 #define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED 0x1UL
2043 #define PORT_PHY_QCAPS_RESP_RSVD1_MASK 0xfeUL
2044 #define PORT_PHY_QCAPS_RESP_RSVD1_SFT 1
2045 u8 unused_0;
2046 __le16 supported_speeds_force_mode;
2047 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
2048 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
2049 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
2050 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
2051 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
2052 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
2053 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
2054 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
2055 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
2056 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
2057 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
2058 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
2059 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
2060 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
2061 __le16 supported_speeds_auto_mode;
2062 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
2063 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
2064 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
2065 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
2066 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
2067 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
2068 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
2069 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
2070 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
2071 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
2072 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
2073 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
2074 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
2075 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
2076 __le16 supported_speeds_eee_mode;
2077 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
2078 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
2079 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
2080 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
2081 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
2082 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
2083 #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
2084 __le32 tx_lpi_timer_low;
2085 #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
2086 #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
2087 #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
2088 #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
2089 __le32 valid_tx_lpi_timer_high;
2090 #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
2091 #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
2092 #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
2093 #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
2094};
2095
1758/* Input (24 bytes) */ 2096/* Input (24 bytes) */
1759struct hwrm_queue_qportcfg_input { 2097struct hwrm_queue_qportcfg_input {
1760 __le16 req_type; 2098 __le16 req_type;
@@ -1766,6 +2104,7 @@ struct hwrm_queue_qportcfg_input {
1766 #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL 2104 #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
1767 #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0) 2105 #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
1768 #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0) 2106 #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
2107 #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
1769 __le16 port_id; 2108 __le16 port_id;
1770 __le16 unused_0; 2109 __le16 unused_0;
1771}; 2110};
@@ -1838,6 +2177,7 @@ struct hwrm_queue_cfg_input {
1838 #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL 2177 #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL
1839 #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) 2178 #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
1840 #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) 2179 #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
2180 #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_RX
1841 __le32 enables; 2181 __le32 enables;
1842 #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL 2182 #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
1843 #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL 2183 #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
@@ -1875,6 +2215,7 @@ struct hwrm_queue_buffers_cfg_input {
1875 #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL 2215 #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL
1876 #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) 2216 #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
1877 #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) 2217 #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
2218 #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_LAST QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX
1878 __le32 enables; 2219 __le32 enables;
1879 #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL 2220 #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
1880 #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL 2221 #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
@@ -1952,6 +2293,7 @@ struct hwrm_queue_pri2cos_cfg_input {
1952 #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL 2293 #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL
1953 #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) 2294 #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
1954 #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) 2295 #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
2296 #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX
1955 #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL 2297 #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
1956 __le32 enables; 2298 __le32 enables;
1957 u8 port_id; 2299 u8 port_id;
@@ -2158,6 +2500,8 @@ struct hwrm_vnic_cfg_input {
2158 #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL 2500 #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
2159 #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL 2501 #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
2160 #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL 2502 #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
2503 #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
2504 #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
2161 __le32 enables; 2505 __le32 enables;
2162 #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL 2506 #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
2163 #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL 2507 #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -2622,6 +2966,7 @@ struct hwrm_cfa_l2_filter_alloc_input {
2622 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL 2966 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
2623 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0) 2967 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
2624 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0) 2968 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
2969 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
2625 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL 2970 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
2626 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL 2971 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
2627 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL 2972 #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
@@ -2747,6 +3092,7 @@ struct hwrm_cfa_l2_filter_cfg_input {
2747 #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL 3092 #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
2748 #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) 3093 #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
2749 #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) 3094 #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
3095 #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
2750 #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL 3096 #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
2751 __le32 enables; 3097 __le32 enables;
2752 #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL 3098 #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
@@ -3337,6 +3683,41 @@ struct hwrm_fw_reset_output {
3337 u8 valid; 3683 u8 valid;
3338}; 3684};
3339 3685
3686/* hwrm_fw_qstatus */
3687/* Input (24 bytes) */
3688struct hwrm_fw_qstatus_input {
3689 __le16 req_type;
3690 __le16 cmpl_ring;
3691 __le16 seq_id;
3692 __le16 target_id;
3693 __le64 resp_addr;
3694 u8 embedded_proc_type;
3695 #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
3696 #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
3697 #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
3698 #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
3699 #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
3700 u8 unused_0[7];
3701};
3702
3703/* Output (16 bytes) */
3704struct hwrm_fw_qstatus_output {
3705 __le16 error_code;
3706 __le16 req_type;
3707 __le16 seq_id;
3708 __le16 resp_len;
3709 u8 selfrst_status;
3710 #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
3711 #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
3712 #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
3713 u8 unused_0;
3714 __le16 unused_1;
3715 u8 unused_2;
3716 u8 unused_3;
3717 u8 unused_4;
3718 u8 valid;
3719};
3720
3340/* hwrm_exec_fwd_resp */ 3721/* hwrm_exec_fwd_resp */
3341/* Input (128 bytes) */ 3722/* Input (128 bytes) */
3342struct hwrm_exec_fwd_resp_input { 3723struct hwrm_exec_fwd_resp_input {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 43ef392c8588..40a7b0e09612 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -1,6 +1,6 @@
1/* Broadcom NetXtreme-C/E network driver. 1/* Broadcom NetXtreme-C/E network driver.
2 * 2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation 3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 0c5f510492f1..8457850b0bdd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1,6 +1,6 @@
1/* Broadcom NetXtreme-C/E network driver. 1/* Broadcom NetXtreme-C/E network driver.
2 * 2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation 3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -771,12 +771,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
771 PORT_PHY_QCFG_RESP_LINK_NO_LINK) { 771 PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
772 phy_qcfg_resp.link = 772 phy_qcfg_resp.link =
773 PORT_PHY_QCFG_RESP_LINK_LINK; 773 PORT_PHY_QCFG_RESP_LINK_LINK;
774 if (phy_qcfg_resp.auto_link_speed) 774 phy_qcfg_resp.link_speed = cpu_to_le16(
775 phy_qcfg_resp.link_speed = 775 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
776 phy_qcfg_resp.auto_link_speed;
777 else
778 phy_qcfg_resp.link_speed =
779 phy_qcfg_resp.force_link_speed;
780 phy_qcfg_resp.duplex = 776 phy_qcfg_resp.duplex =
781 PORT_PHY_QCFG_RESP_DUPLEX_FULL; 777 PORT_PHY_QCFG_RESP_DUPLEX_FULL;
782 phy_qcfg_resp.pause = 778 phy_qcfg_resp.pause =
@@ -859,8 +855,8 @@ void bnxt_update_vf_mac(struct bnxt *bp)
859 * default but the stored zero MAC will allow the VF user to change 855 * default but the stored zero MAC will allow the VF user to change
860 * the random MAC address using ndo_set_mac_address() if he wants. 856 * the random MAC address using ndo_set_mac_address() if he wants.
861 */ 857 */
862 if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) 858 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
863 memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); 859 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
864 860
865 /* overwrite netdev dev_addr with admin VF MAC */ 861 /* overwrite netdev dev_addr with admin VF MAC */
866 if (is_valid_ether_addr(bp->vf.mac_addr)) 862 if (is_valid_ether_addr(bp->vf.mac_addr))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index c151280e3980..3f08354a247e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -1,6 +1,6 @@
1/* Broadcom NetXtreme-C/E network driver. 1/* Broadcom NetXtreme-C/E network driver.
2 * 2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation 3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6746fd03cb3a..f7b42b9fc979 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -104,8 +104,8 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
104static inline void dmadesc_set(struct bcmgenet_priv *priv, 104static inline void dmadesc_set(struct bcmgenet_priv *priv,
105 void __iomem *d, dma_addr_t addr, u32 val) 105 void __iomem *d, dma_addr_t addr, u32 val)
106{ 106{
107 dmadesc_set_length_status(priv, d, val);
108 dmadesc_set_addr(priv, d, addr); 107 dmadesc_set_addr(priv, d, addr);
108 dmadesc_set_length_status(priv, d, val);
109} 109}
110 110
111static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, 111static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
@@ -1171,6 +1171,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1171 struct enet_cb *tx_cb_ptr; 1171 struct enet_cb *tx_cb_ptr;
1172 struct netdev_queue *txq; 1172 struct netdev_queue *txq;
1173 unsigned int pkts_compl = 0; 1173 unsigned int pkts_compl = 0;
1174 unsigned int bytes_compl = 0;
1174 unsigned int c_index; 1175 unsigned int c_index;
1175 unsigned int txbds_ready; 1176 unsigned int txbds_ready;
1176 unsigned int txbds_processed = 0; 1177 unsigned int txbds_processed = 0;
@@ -1193,16 +1194,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1193 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; 1194 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
1194 if (tx_cb_ptr->skb) { 1195 if (tx_cb_ptr->skb) {
1195 pkts_compl++; 1196 pkts_compl++;
1196 dev->stats.tx_packets++; 1197 bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
1197 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1198 dma_unmap_single(&dev->dev, 1198 dma_unmap_single(&dev->dev,
1199 dma_unmap_addr(tx_cb_ptr, dma_addr), 1199 dma_unmap_addr(tx_cb_ptr, dma_addr),
1200 dma_unmap_len(tx_cb_ptr, dma_len), 1200 dma_unmap_len(tx_cb_ptr, dma_len),
1201 DMA_TO_DEVICE); 1201 DMA_TO_DEVICE);
1202 bcmgenet_free_cb(tx_cb_ptr); 1202 bcmgenet_free_cb(tx_cb_ptr);
1203 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { 1203 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1204 dev->stats.tx_bytes +=
1205 dma_unmap_len(tx_cb_ptr, dma_len);
1206 dma_unmap_page(&dev->dev, 1204 dma_unmap_page(&dev->dev,
1207 dma_unmap_addr(tx_cb_ptr, dma_addr), 1205 dma_unmap_addr(tx_cb_ptr, dma_addr),
1208 dma_unmap_len(tx_cb_ptr, dma_len), 1206 dma_unmap_len(tx_cb_ptr, dma_len),
@@ -1220,6 +1218,9 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1220 ring->free_bds += txbds_processed; 1218 ring->free_bds += txbds_processed;
1221 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; 1219 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
1222 1220
1221 dev->stats.tx_packets += pkts_compl;
1222 dev->stats.tx_bytes += bytes_compl;
1223
1223 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { 1224 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1224 txq = netdev_get_tx_queue(dev, ring->queue); 1225 txq = netdev_get_tx_queue(dev, ring->queue);
1225 if (netif_tx_queue_stopped(txq)) 1226 if (netif_tx_queue_stopped(txq))
@@ -1296,7 +1297,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
1296 1297
1297 tx_cb_ptr->skb = skb; 1298 tx_cb_ptr->skb = skb;
1298 1299
1299 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); 1300 skb_len = skb_headlen(skb);
1300 1301
1301 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 1302 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1302 ret = dma_mapping_error(kdev, mapping); 1303 ret = dma_mapping_error(kdev, mapping);
@@ -1330,6 +1331,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
1330 struct bcmgenet_priv *priv = netdev_priv(dev); 1331 struct bcmgenet_priv *priv = netdev_priv(dev);
1331 struct device *kdev = &priv->pdev->dev; 1332 struct device *kdev = &priv->pdev->dev;
1332 struct enet_cb *tx_cb_ptr; 1333 struct enet_cb *tx_cb_ptr;
1334 unsigned int frag_size;
1333 dma_addr_t mapping; 1335 dma_addr_t mapping;
1334 int ret; 1336 int ret;
1335 1337
@@ -1337,10 +1339,12 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
1337 1339
1338 if (unlikely(!tx_cb_ptr)) 1340 if (unlikely(!tx_cb_ptr))
1339 BUG(); 1341 BUG();
1342
1340 tx_cb_ptr->skb = NULL; 1343 tx_cb_ptr->skb = NULL;
1341 1344
1342 mapping = skb_frag_dma_map(kdev, frag, 0, 1345 frag_size = skb_frag_size(frag);
1343 skb_frag_size(frag), DMA_TO_DEVICE); 1346
1347 mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
1344 ret = dma_mapping_error(kdev, mapping); 1348 ret = dma_mapping_error(kdev, mapping);
1345 if (ret) { 1349 if (ret) {
1346 priv->mib.tx_dma_failed++; 1350 priv->mib.tx_dma_failed++;
@@ -1350,10 +1354,10 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
1350 } 1354 }
1351 1355
1352 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); 1356 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1353 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); 1357 dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
1354 1358
1355 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, 1359 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1356 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | 1360 (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1357 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); 1361 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1358 1362
1359 return 0; 1363 return 0;
@@ -1446,15 +1450,19 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1446 else 1450 else
1447 index -= 1; 1451 index -= 1;
1448 1452
1449 nr_frags = skb_shinfo(skb)->nr_frags;
1450 ring = &priv->tx_rings[index]; 1453 ring = &priv->tx_rings[index];
1451 txq = netdev_get_tx_queue(dev, ring->queue); 1454 txq = netdev_get_tx_queue(dev, ring->queue);
1452 1455
1456 nr_frags = skb_shinfo(skb)->nr_frags;
1457
1453 spin_lock_irqsave(&ring->lock, flags); 1458 spin_lock_irqsave(&ring->lock, flags);
1454 if (ring->free_bds <= nr_frags + 1) { 1459 if (ring->free_bds <= (nr_frags + 1)) {
1455 netif_tx_stop_queue(txq); 1460 if (!netif_tx_queue_stopped(txq)) {
1456 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", 1461 netif_tx_stop_queue(txq);
1457 __func__, index, ring->queue); 1462 netdev_err(dev,
1463 "%s: tx ring %d full when queue %d awake\n",
1464 __func__, index, ring->queue);
1465 }
1458 ret = NETDEV_TX_BUSY; 1466 ret = NETDEV_TX_BUSY;
1459 goto out; 1467 goto out;
1460 } 1468 }
@@ -1464,6 +1472,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1464 goto out; 1472 goto out;
1465 } 1473 }
1466 1474
1475 /* Retain how many bytes will be sent on the wire, without TSB inserted
1476 * by transmit checksum offload
1477 */
1478 GENET_CB(skb)->bytes_sent = skb->len;
1479
1467 /* set the SKB transmit checksum */ 1480 /* set the SKB transmit checksum */
1468 if (priv->desc_64b_en) { 1481 if (priv->desc_64b_en) {
1469 skb = bcmgenet_put_tx_csum(dev, skb); 1482 skb = bcmgenet_put_tx_csum(dev, skb);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 967367557309..1e2dc34d331a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -531,6 +531,12 @@ struct bcmgenet_hw_params {
531 u32 flags; 531 u32 flags;
532}; 532};
533 533
534struct bcmgenet_skb_cb {
535 unsigned int bytes_sent; /* bytes on the wire (no TSB) */
536};
537
538#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb))
539
534struct bcmgenet_tx_ring { 540struct bcmgenet_tx_ring {
535 spinlock_t lock; /* ring lock */ 541 spinlock_t lock; /* ring lock */
536 struct napi_struct napi; /* NAPI per tx queue */ 542 struct napi_struct napi; /* NAPI per tx queue */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 3ce6095ced3d..eec3200ade4a 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -61,8 +61,7 @@
61#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) 61#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
62#define MACB_WOL_ENABLED (0x1 << 1) 62#define MACB_WOL_ENABLED (0x1 << 1)
63 63
64/* 64/* Graceful stop timeouts in us. We should allow up to
65 * Graceful stop timeouts in us. We should allow up to
66 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) 65 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
67 */ 66 */
68#define MACB_HALT_TIMEOUT 1230 67#define MACB_HALT_TIMEOUT 1230
@@ -130,9 +129,8 @@ static void hw_writel(struct macb *bp, int offset, u32 value)
130 writel_relaxed(value, bp->regs + offset); 129 writel_relaxed(value, bp->regs + offset);
131} 130}
132 131
133/* 132/* Find the CPU endianness by using the loopback bit of NCR register. When the
134 * Find the CPU endianness by using the loopback bit of NCR register. When the 133 * CPU is in big endian we need to program swapped mode for management
135 * CPU is in big endian we need to program swaped mode for management
136 * descriptor access. 134 * descriptor access.
137 */ 135 */
138static bool hw_is_native_io(void __iomem *addr) 136static bool hw_is_native_io(void __iomem *addr)
@@ -189,7 +187,7 @@ static void macb_get_hwaddr(struct macb *bp)
189 187
190 pdata = dev_get_platdata(&bp->pdev->dev); 188 pdata = dev_get_platdata(&bp->pdev->dev);
191 189
192 /* Check all 4 address register for vaild address */ 190 /* Check all 4 address register for valid address */
193 for (i = 0; i < 4; i++) { 191 for (i = 0; i < 4; i++) {
194 bottom = macb_or_gem_readl(bp, SA1B + i * 8); 192 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
195 top = macb_or_gem_readl(bp, SA1T + i * 8); 193 top = macb_or_gem_readl(bp, SA1T + i * 8);
@@ -297,7 +295,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
297 ferr = DIV_ROUND_UP(ferr, rate / 100000); 295 ferr = DIV_ROUND_UP(ferr, rate / 100000);
298 if (ferr > 5) 296 if (ferr > 5)
299 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", 297 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
300 rate); 298 rate);
301 299
302 if (clk_set_rate(clk, rate_rounded)) 300 if (clk_set_rate(clk, rate_rounded))
303 netdev_err(dev, "adjusting tx_clk failed.\n"); 301 netdev_err(dev, "adjusting tx_clk failed.\n");
@@ -386,7 +384,8 @@ static int macb_mii_probe(struct net_device *dev)
386 384
387 pdata = dev_get_platdata(&bp->pdev->dev); 385 pdata = dev_get_platdata(&bp->pdev->dev);
388 if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { 386 if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
389 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); 387 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
388 "phy int");
390 if (!ret) { 389 if (!ret) {
391 phy_irq = gpio_to_irq(pdata->phy_irq_pin); 390 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
392 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; 391 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
@@ -430,7 +429,7 @@ static int macb_mii_init(struct macb *bp)
430 macb_writel(bp, NCR, MACB_BIT(MPE)); 429 macb_writel(bp, NCR, MACB_BIT(MPE));
431 430
432 bp->mii_bus = mdiobus_alloc(); 431 bp->mii_bus = mdiobus_alloc();
433 if (bp->mii_bus == NULL) { 432 if (!bp->mii_bus) {
434 err = -ENOMEM; 433 err = -ENOMEM;
435 goto err_out; 434 goto err_out;
436 } 435 }
@@ -439,7 +438,7 @@ static int macb_mii_init(struct macb *bp)
439 bp->mii_bus->read = &macb_mdio_read; 438 bp->mii_bus->read = &macb_mdio_read;
440 bp->mii_bus->write = &macb_mdio_write; 439 bp->mii_bus->write = &macb_mdio_write;
441 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 440 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
442 bp->pdev->name, bp->pdev->id); 441 bp->pdev->name, bp->pdev->id);
443 bp->mii_bus->priv = bp; 442 bp->mii_bus->priv = bp;
444 bp->mii_bus->parent = &bp->dev->dev; 443 bp->mii_bus->parent = &bp->dev->dev;
445 pdata = dev_get_platdata(&bp->pdev->dev); 444 pdata = dev_get_platdata(&bp->pdev->dev);
@@ -452,7 +451,8 @@ static int macb_mii_init(struct macb *bp)
452 err = of_mdiobus_register(bp->mii_bus, np); 451 err = of_mdiobus_register(bp->mii_bus, np);
453 452
454 /* fallback to standard phy registration if no phy were 453 /* fallback to standard phy registration if no phy were
455 found during dt phy registration */ 454 * found during dt phy registration
455 */
456 if (!err && !phy_find_first(bp->mii_bus)) { 456 if (!err && !phy_find_first(bp->mii_bus)) {
457 for (i = 0; i < PHY_MAX_ADDR; i++) { 457 for (i = 0; i < PHY_MAX_ADDR; i++) {
458 struct phy_device *phydev; 458 struct phy_device *phydev;
@@ -499,7 +499,7 @@ static void macb_update_stats(struct macb *bp)
499 499
500 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 500 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
501 501
502 for(; p < end; p++, offset += 4) 502 for (; p < end; p++, offset += 4)
503 *p += bp->macb_reg_readl(bp, offset); 503 *p += bp->macb_reg_readl(bp, offset);
504} 504}
505 505
@@ -567,8 +567,7 @@ static void macb_tx_error_task(struct work_struct *work)
567 /* Make sure nobody is trying to queue up new packets */ 567 /* Make sure nobody is trying to queue up new packets */
568 netif_tx_stop_all_queues(bp->dev); 568 netif_tx_stop_all_queues(bp->dev);
569 569
570 /* 570 /* Stop transmission now
571 * Stop transmission now
572 * (in case we have just queued new packets) 571 * (in case we have just queued new packets)
573 * macb/gem must be halted to write TBQP register 572 * macb/gem must be halted to write TBQP register
574 */ 573 */
@@ -576,8 +575,7 @@ static void macb_tx_error_task(struct work_struct *work)
576 /* Just complain for now, reinitializing TX path can be good */ 575 /* Just complain for now, reinitializing TX path can be good */
577 netdev_err(bp->dev, "BUG: halt tx timed out\n"); 576 netdev_err(bp->dev, "BUG: halt tx timed out\n");
578 577
579 /* 578 /* Treat frames in TX queue including the ones that caused the error.
580 * Treat frames in TX queue including the ones that caused the error.
581 * Free transmit buffers in upper layer. 579 * Free transmit buffers in upper layer.
582 */ 580 */
583 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { 581 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
@@ -607,10 +605,9 @@ static void macb_tx_error_task(struct work_struct *work)
607 bp->stats.tx_bytes += skb->len; 605 bp->stats.tx_bytes += skb->len;
608 } 606 }
609 } else { 607 } else {
610 /* 608 /* "Buffers exhausted mid-frame" errors may only happen
611 * "Buffers exhausted mid-frame" errors may only happen 609 * if the driver is buggy, so complain loudly about
612 * if the driver is buggy, so complain loudly about those. 610 * those. Statistics are updated by hardware.
613 * Statistics are updated by hardware.
614 */ 611 */
615 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) 612 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
616 netdev_err(bp->dev, 613 netdev_err(bp->dev,
@@ -662,7 +659,7 @@ static void macb_tx_interrupt(struct macb_queue *queue)
662 queue_writel(queue, ISR, MACB_BIT(TCOMP)); 659 queue_writel(queue, ISR, MACB_BIT(TCOMP));
663 660
664 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 661 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
665 (unsigned long)status); 662 (unsigned long)status);
666 663
667 head = queue->tx_head; 664 head = queue->tx_head;
668 for (tail = queue->tx_tail; tail != head; tail++) { 665 for (tail = queue->tx_tail; tail != head; tail++) {
@@ -722,7 +719,8 @@ static void gem_rx_refill(struct macb *bp)
722 struct sk_buff *skb; 719 struct sk_buff *skb;
723 dma_addr_t paddr; 720 dma_addr_t paddr;
724 721
725 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { 722 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
723 RX_RING_SIZE) > 0) {
726 entry = macb_rx_ring_wrap(bp->rx_prepared_head); 724 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
727 725
728 /* Make hw descriptor updates visible to CPU */ 726 /* Make hw descriptor updates visible to CPU */
@@ -730,10 +728,10 @@ static void gem_rx_refill(struct macb *bp)
730 728
731 bp->rx_prepared_head++; 729 bp->rx_prepared_head++;
732 730
733 if (bp->rx_skbuff[entry] == NULL) { 731 if (!bp->rx_skbuff[entry]) {
734 /* allocate sk_buff for this free entry in ring */ 732 /* allocate sk_buff for this free entry in ring */
735 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 733 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
736 if (unlikely(skb == NULL)) { 734 if (unlikely(!skb)) {
737 netdev_err(bp->dev, 735 netdev_err(bp->dev,
738 "Unable to allocate sk_buff\n"); 736 "Unable to allocate sk_buff\n");
739 break; 737 break;
@@ -741,7 +739,8 @@ static void gem_rx_refill(struct macb *bp)
741 739
742 /* now fill corresponding descriptor entry */ 740 /* now fill corresponding descriptor entry */
743 paddr = dma_map_single(&bp->pdev->dev, skb->data, 741 paddr = dma_map_single(&bp->pdev->dev, skb->data,
744 bp->rx_buffer_size, DMA_FROM_DEVICE); 742 bp->rx_buffer_size,
743 DMA_FROM_DEVICE);
745 if (dma_mapping_error(&bp->pdev->dev, paddr)) { 744 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
746 dev_kfree_skb(skb); 745 dev_kfree_skb(skb);
747 break; 746 break;
@@ -766,7 +765,7 @@ static void gem_rx_refill(struct macb *bp)
766 wmb(); 765 wmb();
767 766
768 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", 767 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
769 bp->rx_prepared_head, bp->rx_tail); 768 bp->rx_prepared_head, bp->rx_tail);
770} 769}
771 770
772/* Mark DMA descriptors from begin up to and not including end as unused */ 771/* Mark DMA descriptors from begin up to and not including end as unused */
@@ -777,14 +776,14 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
777 776
778 for (frag = begin; frag != end; frag++) { 777 for (frag = begin; frag != end; frag++) {
779 struct macb_dma_desc *desc = macb_rx_desc(bp, frag); 778 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
779
780 desc->addr &= ~MACB_BIT(RX_USED); 780 desc->addr &= ~MACB_BIT(RX_USED);
781 } 781 }
782 782
783 /* Make descriptor updates visible to hardware */ 783 /* Make descriptor updates visible to hardware */
784 wmb(); 784 wmb();
785 785
786 /* 786 /* When this happens, the hardware stats registers for
787 * When this happens, the hardware stats registers for
788 * whatever caused this is updated, so we don't have to record 787 * whatever caused this is updated, so we don't have to record
789 * anything. 788 * anything.
790 */ 789 */
@@ -880,11 +879,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
880 len = desc->ctrl & bp->rx_frm_len_mask; 879 len = desc->ctrl & bp->rx_frm_len_mask;
881 880
882 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 881 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
883 macb_rx_ring_wrap(first_frag), 882 macb_rx_ring_wrap(first_frag),
884 macb_rx_ring_wrap(last_frag), len); 883 macb_rx_ring_wrap(last_frag), len);
885 884
886 /* 885 /* The ethernet header starts NET_IP_ALIGN bytes into the
887 * The ethernet header starts NET_IP_ALIGN bytes into the
888 * first buffer. Since the header is 14 bytes, this makes the 886 * first buffer. Since the header is 14 bytes, this makes the
889 * payload word-aligned. 887 * payload word-aligned.
890 * 888 *
@@ -917,11 +915,15 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
917 unsigned int frag_len = bp->rx_buffer_size; 915 unsigned int frag_len = bp->rx_buffer_size;
918 916
919 if (offset + frag_len > len) { 917 if (offset + frag_len > len) {
920 BUG_ON(frag != last_frag); 918 if (unlikely(frag != last_frag)) {
919 dev_kfree_skb_any(skb);
920 return -1;
921 }
921 frag_len = len - offset; 922 frag_len = len - offset;
922 } 923 }
923 skb_copy_to_linear_data_offset(skb, offset, 924 skb_copy_to_linear_data_offset(skb, offset,
924 macb_rx_buffer(bp, frag), frag_len); 925 macb_rx_buffer(bp, frag),
926 frag_len);
925 offset += bp->rx_buffer_size; 927 offset += bp->rx_buffer_size;
926 desc = macb_rx_desc(bp, frag); 928 desc = macb_rx_desc(bp, frag);
927 desc->addr &= ~MACB_BIT(RX_USED); 929 desc->addr &= ~MACB_BIT(RX_USED);
@@ -939,14 +941,29 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
939 bp->stats.rx_packets++; 941 bp->stats.rx_packets++;
940 bp->stats.rx_bytes += skb->len; 942 bp->stats.rx_bytes += skb->len;
941 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", 943 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
942 skb->len, skb->csum); 944 skb->len, skb->csum);
943 netif_receive_skb(skb); 945 netif_receive_skb(skb);
944 946
945 return 0; 947 return 0;
946} 948}
947 949
950static inline void macb_init_rx_ring(struct macb *bp)
951{
952 dma_addr_t addr;
953 int i;
954
955 addr = bp->rx_buffers_dma;
956 for (i = 0; i < RX_RING_SIZE; i++) {
957 bp->rx_ring[i].addr = addr;
958 bp->rx_ring[i].ctrl = 0;
959 addr += bp->rx_buffer_size;
960 }
961 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
962}
963
948static int macb_rx(struct macb *bp, int budget) 964static int macb_rx(struct macb *bp, int budget)
949{ 965{
966 bool reset_rx_queue = false;
950 int received = 0; 967 int received = 0;
951 unsigned int tail; 968 unsigned int tail;
952 int first_frag = -1; 969 int first_frag = -1;
@@ -972,10 +989,18 @@ static int macb_rx(struct macb *bp, int budget)
972 989
973 if (ctrl & MACB_BIT(RX_EOF)) { 990 if (ctrl & MACB_BIT(RX_EOF)) {
974 int dropped; 991 int dropped;
975 BUG_ON(first_frag == -1); 992
993 if (unlikely(first_frag == -1)) {
994 reset_rx_queue = true;
995 continue;
996 }
976 997
977 dropped = macb_rx_frame(bp, first_frag, tail); 998 dropped = macb_rx_frame(bp, first_frag, tail);
978 first_frag = -1; 999 first_frag = -1;
1000 if (unlikely(dropped < 0)) {
1001 reset_rx_queue = true;
1002 continue;
1003 }
979 if (!dropped) { 1004 if (!dropped) {
980 received++; 1005 received++;
981 budget--; 1006 budget--;
@@ -983,6 +1008,26 @@ static int macb_rx(struct macb *bp, int budget)
983 } 1008 }
984 } 1009 }
985 1010
1011 if (unlikely(reset_rx_queue)) {
1012 unsigned long flags;
1013 u32 ctrl;
1014
1015 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1016
1017 spin_lock_irqsave(&bp->lock, flags);
1018
1019 ctrl = macb_readl(bp, NCR);
1020 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1021
1022 macb_init_rx_ring(bp);
1023 macb_writel(bp, RBQP, bp->rx_ring_dma);
1024
1025 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1026
1027 spin_unlock_irqrestore(&bp->lock, flags);
1028 return received;
1029 }
1030
986 if (first_frag != -1) 1031 if (first_frag != -1)
987 bp->rx_tail = first_frag; 1032 bp->rx_tail = first_frag;
988 else 1033 else
@@ -1003,7 +1048,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
1003 work_done = 0; 1048 work_done = 0;
1004 1049
1005 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", 1050 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1006 (unsigned long)status, budget); 1051 (unsigned long)status, budget);
1007 1052
1008 work_done = bp->macbgem_ops.mog_rx(bp, budget); 1053 work_done = bp->macbgem_ops.mog_rx(bp, budget);
1009 if (work_done < budget) { 1054 if (work_done < budget) {
@@ -1053,8 +1098,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1053 (unsigned long)status); 1098 (unsigned long)status);
1054 1099
1055 if (status & MACB_RX_INT_FLAGS) { 1100 if (status & MACB_RX_INT_FLAGS) {
1056 /* 1101 /* There's no point taking any more interrupts
1057 * There's no point taking any more interrupts
1058 * until we have processed the buffers. The 1102 * until we have processed the buffers. The
1059 * scheduling call may fail if the poll routine 1103 * scheduling call may fail if the poll routine
1060 * is already scheduled, so disable interrupts 1104 * is already scheduled, so disable interrupts
@@ -1083,8 +1127,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1083 if (status & MACB_BIT(TCOMP)) 1127 if (status & MACB_BIT(TCOMP))
1084 macb_tx_interrupt(queue); 1128 macb_tx_interrupt(queue);
1085 1129
1086 /* 1130 /* Link change detection isn't possible with RMII, so we'll
1087 * Link change detection isn't possible with RMII, so we'll
1088 * add that if/when we get our hands on a full-blown MII PHY. 1131 * add that if/when we get our hands on a full-blown MII PHY.
1089 */ 1132 */
1090 1133
@@ -1100,7 +1143,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1100 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1143 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1101 1144
1102 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1145 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1103 macb_writel(bp, ISR, MACB_BIT(RXUBR)); 1146 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1104 } 1147 }
1105 1148
1106 if (status & MACB_BIT(ISR_ROVR)) { 1149 if (status & MACB_BIT(ISR_ROVR)) {
@@ -1115,8 +1158,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1115 } 1158 }
1116 1159
1117 if (status & MACB_BIT(HRESP)) { 1160 if (status & MACB_BIT(HRESP)) {
1118 /* 1161 /* TODO: Reset the hardware, and maybe move the
1119 * TODO: Reset the hardware, and maybe move the
1120 * netdev_err to a lower-priority context as well 1162 * netdev_err to a lower-priority context as well
1121 * (work queue?) 1163 * (work queue?)
1122 */ 1164 */
@@ -1135,8 +1177,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1135} 1177}
1136 1178
1137#ifdef CONFIG_NET_POLL_CONTROLLER 1179#ifdef CONFIG_NET_POLL_CONTROLLER
1138/* 1180/* Polling receive - used by netconsole and other diagnostic tools
1139 * Polling receive - used by netconsole and other diagnostic tools
1140 * to allow network i/o with interrupts disabled. 1181 * to allow network i/o with interrupts disabled.
1141 */ 1182 */
1142static void macb_poll_controller(struct net_device *dev) 1183static void macb_poll_controller(struct net_device *dev)
@@ -1222,7 +1263,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1222 } 1263 }
1223 1264
1224 /* Should never happen */ 1265 /* Should never happen */
1225 if (unlikely(tx_skb == NULL)) { 1266 if (unlikely(!tx_skb)) {
1226 netdev_err(bp->dev, "BUG! empty skb!\n"); 1267 netdev_err(bp->dev, "BUG! empty skb!\n");
1227 return 0; 1268 return 0;
1228 } 1269 }
@@ -1292,16 +1333,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1292 1333
1293#if defined(DEBUG) && defined(VERBOSE_DEBUG) 1334#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1294 netdev_vdbg(bp->dev, 1335 netdev_vdbg(bp->dev,
1295 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", 1336 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1296 queue_index, skb->len, skb->head, skb->data, 1337 queue_index, skb->len, skb->head, skb->data,
1297 skb_tail_pointer(skb), skb_end_pointer(skb)); 1338 skb_tail_pointer(skb), skb_end_pointer(skb));
1298 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, 1339 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1299 skb->data, 16, true); 1340 skb->data, 16, true);
1300#endif 1341#endif
1301 1342
1302 /* Count how many TX buffer descriptors are needed to send this 1343 /* Count how many TX buffer descriptors are needed to send this
1303 * socket buffer: skb fragments of jumbo frames may need to be 1344 * socket buffer: skb fragments of jumbo frames may need to be
1304 * splitted into many buffer descriptors. 1345 * split into many buffer descriptors.
1305 */ 1346 */
1306 count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); 1347 count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1307 nr_frags = skb_shinfo(skb)->nr_frags; 1348 nr_frags = skb_shinfo(skb)->nr_frags;
@@ -1352,8 +1393,8 @@ static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1352 1393
1353 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { 1394 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1354 netdev_dbg(bp->dev, 1395 netdev_dbg(bp->dev,
1355 "RX buffer must be multiple of %d bytes, expanding\n", 1396 "RX buffer must be multiple of %d bytes, expanding\n",
1356 RX_BUFFER_MULTIPLE); 1397 RX_BUFFER_MULTIPLE);
1357 bp->rx_buffer_size = 1398 bp->rx_buffer_size =
1358 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); 1399 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1359 } 1400 }
@@ -1376,7 +1417,7 @@ static void gem_free_rx_buffers(struct macb *bp)
1376 for (i = 0; i < RX_RING_SIZE; i++) { 1417 for (i = 0; i < RX_RING_SIZE; i++) {
1377 skb = bp->rx_skbuff[i]; 1418 skb = bp->rx_skbuff[i];
1378 1419
1379 if (skb == NULL) 1420 if (!skb)
1380 continue; 1421 continue;
1381 1422
1382 desc = &bp->rx_ring[i]; 1423 desc = &bp->rx_ring[i];
@@ -1432,10 +1473,10 @@ static int gem_alloc_rx_buffers(struct macb *bp)
1432 bp->rx_skbuff = kzalloc(size, GFP_KERNEL); 1473 bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1433 if (!bp->rx_skbuff) 1474 if (!bp->rx_skbuff)
1434 return -ENOMEM; 1475 return -ENOMEM;
1435 else 1476
1436 netdev_dbg(bp->dev, 1477 netdev_dbg(bp->dev,
1437 "Allocated %d RX struct sk_buff entries at %p\n", 1478 "Allocated %d RX struct sk_buff entries at %p\n",
1438 RX_RING_SIZE, bp->rx_skbuff); 1479 RX_RING_SIZE, bp->rx_skbuff);
1439 return 0; 1480 return 0;
1440} 1481}
1441 1482
@@ -1448,10 +1489,10 @@ static int macb_alloc_rx_buffers(struct macb *bp)
1448 &bp->rx_buffers_dma, GFP_KERNEL); 1489 &bp->rx_buffers_dma, GFP_KERNEL);
1449 if (!bp->rx_buffers) 1490 if (!bp->rx_buffers)
1450 return -ENOMEM; 1491 return -ENOMEM;
1451 else 1492
1452 netdev_dbg(bp->dev, 1493 netdev_dbg(bp->dev,
1453 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", 1494 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1454 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); 1495 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1455 return 0; 1496 return 0;
1456} 1497}
1457 1498
@@ -1523,15 +1564,8 @@ static void gem_init_rings(struct macb *bp)
1523static void macb_init_rings(struct macb *bp) 1564static void macb_init_rings(struct macb *bp)
1524{ 1565{
1525 int i; 1566 int i;
1526 dma_addr_t addr;
1527 1567
1528 addr = bp->rx_buffers_dma; 1568 macb_init_rx_ring(bp);
1529 for (i = 0; i < RX_RING_SIZE; i++) {
1530 bp->rx_ring[i].addr = addr;
1531 bp->rx_ring[i].ctrl = 0;
1532 addr += bp->rx_buffer_size;
1533 }
1534 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
1535 1569
1536 for (i = 0; i < TX_RING_SIZE; i++) { 1570 for (i = 0; i < TX_RING_SIZE; i++) {
1537 bp->queues[0].tx_ring[i].addr = 0; 1571 bp->queues[0].tx_ring[i].addr = 0;
@@ -1549,8 +1583,7 @@ static void macb_reset_hw(struct macb *bp)
1549 struct macb_queue *queue; 1583 struct macb_queue *queue;
1550 unsigned int q; 1584 unsigned int q;
1551 1585
1552 /* 1586 /* Disable RX and TX (XXX: Should we halt the transmission
1553 * Disable RX and TX (XXX: Should we halt the transmission
1554 * more gracefully?) 1587 * more gracefully?)
1555 */ 1588 */
1556 macb_writel(bp, NCR, 0); 1589 macb_writel(bp, NCR, 0);
@@ -1613,8 +1646,7 @@ static u32 macb_mdc_clk_div(struct macb *bp)
1613 return config; 1646 return config;
1614} 1647}
1615 1648
1616/* 1649/* Get the DMA bus width field of the network configuration register that we
1617 * Get the DMA bus width field of the network configuration register that we
1618 * should program. We find the width from decoding the design configuration 1650 * should program. We find the width from decoding the design configuration
1619 * register to find the maximum supported data bus width. 1651 * register to find the maximum supported data bus width.
1620 */ 1652 */
@@ -1634,8 +1666,7 @@ static u32 macb_dbw(struct macb *bp)
1634 } 1666 }
1635} 1667}
1636 1668
1637/* 1669/* Configure the receive DMA engine
1638 * Configure the receive DMA engine
1639 * - use the correct receive buffer size 1670 * - use the correct receive buffer size
1640 * - set best burst length for DMA operations 1671 * - set best burst length for DMA operations
1641 * (if not supported by FIFO, it will fallback to default) 1672 * (if not supported by FIFO, it will fallback to default)
@@ -1723,8 +1754,7 @@ static void macb_init_hw(struct macb *bp)
1723 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 1754 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1724} 1755}
1725 1756
1726/* 1757/* The hash address register is 64 bits long and takes up two
1727 * The hash address register is 64 bits long and takes up two
1728 * locations in the memory map. The least significant bits are stored 1758 * locations in the memory map. The least significant bits are stored
1729 * in EMAC_HSL and the most significant bits in EMAC_HSH. 1759 * in EMAC_HSL and the most significant bits in EMAC_HSH.
1730 * 1760 *
@@ -1764,9 +1794,7 @@ static inline int hash_bit_value(int bitnr, __u8 *addr)
1764 return 0; 1794 return 0;
1765} 1795}
1766 1796
1767/* 1797/* Return the hash index value for the specified address. */
1768 * Return the hash index value for the specified address.
1769 */
1770static int hash_get_index(__u8 *addr) 1798static int hash_get_index(__u8 *addr)
1771{ 1799{
1772 int i, j, bitval; 1800 int i, j, bitval;
@@ -1782,9 +1810,7 @@ static int hash_get_index(__u8 *addr)
1782 return hash_index; 1810 return hash_index;
1783} 1811}
1784 1812
1785/* 1813/* Add multicast addresses to the internal multicast-hash table. */
1786 * Add multicast addresses to the internal multicast-hash table.
1787 */
1788static void macb_sethashtable(struct net_device *dev) 1814static void macb_sethashtable(struct net_device *dev)
1789{ 1815{
1790 struct netdev_hw_addr *ha; 1816 struct netdev_hw_addr *ha;
@@ -1792,7 +1818,8 @@ static void macb_sethashtable(struct net_device *dev)
1792 unsigned int bitnr; 1818 unsigned int bitnr;
1793 struct macb *bp = netdev_priv(dev); 1819 struct macb *bp = netdev_priv(dev);
1794 1820
1795 mc_filter[0] = mc_filter[1] = 0; 1821 mc_filter[0] = 0;
1822 mc_filter[1] = 0;
1796 1823
1797 netdev_for_each_mc_addr(ha, dev) { 1824 netdev_for_each_mc_addr(ha, dev) {
1798 bitnr = hash_get_index(ha->addr); 1825 bitnr = hash_get_index(ha->addr);
@@ -1803,9 +1830,7 @@ static void macb_sethashtable(struct net_device *dev)
1803 macb_or_gem_writel(bp, HRT, mc_filter[1]); 1830 macb_or_gem_writel(bp, HRT, mc_filter[1]);
1804} 1831}
1805 1832
1806/* 1833/* Enable/Disable promiscuous and multicast modes. */
1807 * Enable/Disable promiscuous and multicast modes.
1808 */
1809static void macb_set_rx_mode(struct net_device *dev) 1834static void macb_set_rx_mode(struct net_device *dev)
1810{ 1835{
1811 unsigned long cfg; 1836 unsigned long cfg;
@@ -2122,9 +2147,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2122 2147
2123 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) 2148 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2124 regs_buff[12] = macb_or_gem_readl(bp, USRIO); 2149 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2125 if (macb_is_gem(bp)) { 2150 if (macb_is_gem(bp))
2126 regs_buff[13] = gem_readl(bp, DMACFG); 2151 regs_buff[13] = gem_readl(bp, DMACFG);
2127 }
2128} 2152}
2129 2153
2130static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2154static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2247,11 +2271,11 @@ static const struct net_device_ops macb_netdev_ops = {
2247 .ndo_set_features = macb_set_features, 2271 .ndo_set_features = macb_set_features,
2248}; 2272};
2249 2273
2250/* 2274/* Configure peripheral capabilities according to device tree
2251 * Configure peripheral capabilities according to device tree
2252 * and integration options used 2275 * and integration options used
2253 */ 2276 */
2254static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf) 2277static void macb_configure_caps(struct macb *bp,
2278 const struct macb_config *dt_conf)
2255{ 2279{
2256 u32 dcfg; 2280 u32 dcfg;
2257 2281
@@ -2949,7 +2973,7 @@ static int macb_probe(struct platform_device *pdev)
2949 2973
2950 mac = of_get_mac_address(np); 2974 mac = of_get_mac_address(np);
2951 if (mac) 2975 if (mac)
2952 memcpy(bp->dev->dev_addr, mac, ETH_ALEN); 2976 ether_addr_copy(bp->dev->dev_addr, mac);
2953 else 2977 else
2954 macb_get_hwaddr(bp); 2978 macb_get_hwaddr(bp);
2955 2979
@@ -2957,9 +2981,11 @@ static int macb_probe(struct platform_device *pdev)
2957 phy_node = of_get_next_available_child(np, NULL); 2981 phy_node = of_get_next_available_child(np, NULL);
2958 if (phy_node) { 2982 if (phy_node) {
2959 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); 2983 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
2960 if (gpio_is_valid(gpio)) 2984
2985 if (gpio_is_valid(gpio)) {
2961 bp->reset_gpio = gpio_to_desc(gpio); 2986 bp->reset_gpio = gpio_to_desc(gpio);
2962 gpiod_set_value(bp->reset_gpio, GPIOD_OUT_HIGH); 2987 gpiod_direction_output(bp->reset_gpio, 1);
2988 }
2963 } 2989 }
2964 of_node_put(phy_node); 2990 of_node_put(phy_node);
2965 2991
@@ -3029,7 +3055,8 @@ static int macb_remove(struct platform_device *pdev)
3029 mdiobus_free(bp->mii_bus); 3055 mdiobus_free(bp->mii_bus);
3030 3056
3031 /* Shutdown the PHY if there is a GPIO reset */ 3057 /* Shutdown the PHY if there is a GPIO reset */
3032 gpiod_set_value(bp->reset_gpio, GPIOD_OUT_LOW); 3058 if (bp->reset_gpio)
3059 gpiod_set_value(bp->reset_gpio, 0);
3033 3060
3034 unregister_netdev(dev); 3061 unregister_netdev(dev);
3035 clk_disable_unprepare(bp->tx_clk); 3062 clk_disable_unprepare(bp->tx_clk);
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 4d187f22c48b..4686a85a8a22 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -96,6 +96,17 @@ config CHELSIO_T4_DCB
96 96
97 If unsure, say N. 97 If unsure, say N.
98 98
99config CHELSIO_T4_UWIRE
100 bool "Unified Wire Support for Chelsio T5 cards"
101 default n
102 depends on CHELSIO_T4
103 ---help---
104 Enable unified-wire offload features.
105 Say Y here if you want to enable unified-wire over Ethernet
106 in the driver.
107
108 If unsure, say N.
109
99config CHELSIO_T4_FCOE 110config CHELSIO_T4_FCOE
100 bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards" 111 bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards"
101 default n 112 default n
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index ace0ab98d0f1..85c92821b239 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o 7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
8cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o 8cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
9cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o 9cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
10cxgb4-$(CONFIG_CHELSIO_T4_UWIRE) += cxgb4_ppm.o
10cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o 11cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1dac6c6111bf..984a3cc26f86 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -404,6 +404,9 @@ enum {
404 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ 404 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
405 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ 405 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
406 MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */ 406 MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */
407
408 /* # of streaming iSCSIT Rx queues */
409 MAX_ISCSIT_QUEUES = MAX_OFLD_QSETS,
407}; 410};
408 411
409enum { 412enum {
@@ -420,8 +423,8 @@ enum {
420enum { 423enum {
421 INGQ_EXTRAS = 2, /* firmware event queue and */ 424 INGQ_EXTRAS = 2, /* firmware event queue and */
422 /* forwarded interrupts */ 425 /* forwarded interrupts */
423 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES 426 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES +
424 + MAX_RDMA_CIQS + INGQ_EXTRAS, 427 MAX_RDMA_CIQS + MAX_ISCSIT_QUEUES + INGQ_EXTRAS,
425}; 428};
426 429
427struct adapter; 430struct adapter;
@@ -508,6 +511,15 @@ struct pkt_gl {
508 511
509typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, 512typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
510 const struct pkt_gl *gl); 513 const struct pkt_gl *gl);
514typedef void (*rspq_flush_handler_t)(struct sge_rspq *q);
515/* LRO related declarations for ULD */
516struct t4_lro_mgr {
517#define MAX_LRO_SESSIONS 64
518 u8 lro_session_cnt; /* # of sessions to aggregate */
519 unsigned long lro_pkts; /* # of LRO super packets */
520 unsigned long lro_merged; /* # of wire packets merged by LRO */
521 struct sk_buff_head lroq; /* list of aggregated sessions */
522};
511 523
512struct sge_rspq { /* state for an SGE response queue */ 524struct sge_rspq { /* state for an SGE response queue */
513 struct napi_struct napi; 525 struct napi_struct napi;
@@ -532,6 +544,8 @@ struct sge_rspq { /* state for an SGE response queue */
532 struct adapter *adap; 544 struct adapter *adap;
533 struct net_device *netdev; /* associated net device */ 545 struct net_device *netdev; /* associated net device */
534 rspq_handler_t handler; 546 rspq_handler_t handler;
547 rspq_flush_handler_t flush_handler;
548 struct t4_lro_mgr lro_mgr;
535#ifdef CONFIG_NET_RX_BUSY_POLL 549#ifdef CONFIG_NET_RX_BUSY_POLL
536#define CXGB_POLL_STATE_IDLE 0 550#define CXGB_POLL_STATE_IDLE 0
537#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */ 551#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */
@@ -641,6 +655,7 @@ struct sge {
641 655
642 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; 656 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
643 struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS]; 657 struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS];
658 struct sge_ofld_rxq iscsitrxq[MAX_ISCSIT_QUEUES];
644 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; 659 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
645 struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; 660 struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
646 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; 661 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
@@ -652,9 +667,11 @@ struct sge {
652 u16 ethqsets; /* # of active Ethernet queue sets */ 667 u16 ethqsets; /* # of active Ethernet queue sets */
653 u16 ethtxq_rover; /* Tx queue to clean up next */ 668 u16 ethtxq_rover; /* Tx queue to clean up next */
654 u16 iscsiqsets; /* # of active iSCSI queue sets */ 669 u16 iscsiqsets; /* # of active iSCSI queue sets */
670 u16 niscsitq; /* # of available iSCST Rx queues */
655 u16 rdmaqs; /* # of available RDMA Rx queues */ 671 u16 rdmaqs; /* # of available RDMA Rx queues */
656 u16 rdmaciqs; /* # of available RDMA concentrator IQs */ 672 u16 rdmaciqs; /* # of available RDMA concentrator IQs */
657 u16 iscsi_rxq[MAX_OFLD_QSETS]; 673 u16 iscsi_rxq[MAX_OFLD_QSETS];
674 u16 iscsit_rxq[MAX_ISCSIT_QUEUES];
658 u16 rdma_rxq[MAX_RDMA_QUEUES]; 675 u16 rdma_rxq[MAX_RDMA_QUEUES];
659 u16 rdma_ciq[MAX_RDMA_CIQS]; 676 u16 rdma_ciq[MAX_RDMA_CIQS];
660 u16 timer_val[SGE_NTIMERS]; 677 u16 timer_val[SGE_NTIMERS];
@@ -681,6 +698,7 @@ struct sge {
681 698
682#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) 699#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
683#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++) 700#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++)
701#define for_each_iscsitrxq(sge, i) for (i = 0; i < (sge)->niscsitq; i++)
684#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) 702#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
685#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++) 703#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
686 704
@@ -747,6 +765,8 @@ struct adapter {
747 struct list_head rcu_node; 765 struct list_head rcu_node;
748 struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ 766 struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
749 767
768 void *iscsi_ppm;
769
750 struct tid_info tids; 770 struct tid_info tids;
751 void **tid_release_head; 771 void **tid_release_head;
752 spinlock_t tid_release_lock; 772 spinlock_t tid_release_lock;
@@ -1113,7 +1133,8 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
1113int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); 1133int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
1114int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 1134int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1115 struct net_device *dev, int intr_idx, 1135 struct net_device *dev, int intr_idx,
1116 struct sge_fl *fl, rspq_handler_t hnd, int cong); 1136 struct sge_fl *fl, rspq_handler_t hnd,
1137 rspq_flush_handler_t flush_handler, int cong);
1117int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 1138int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
1118 struct net_device *dev, struct netdev_queue *netdevq, 1139 struct net_device *dev, struct netdev_queue *netdevq,
1119 unsigned int iqid); 1140 unsigned int iqid);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index e6a4072b494b..0bb41e9b9b1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2334,12 +2334,14 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
2334 struct adapter *adap = seq->private; 2334 struct adapter *adap = seq->private;
2335 int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); 2335 int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
2336 int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4); 2336 int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4);
2337 int iscsit_entries = DIV_ROUND_UP(adap->sge.niscsitq, 4);
2337 int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4); 2338 int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
2338 int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4); 2339 int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
2339 int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); 2340 int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
2340 int i, r = (uintptr_t)v - 1; 2341 int i, r = (uintptr_t)v - 1;
2341 int iscsi_idx = r - eth_entries; 2342 int iscsi_idx = r - eth_entries;
2342 int rdma_idx = iscsi_idx - iscsi_entries; 2343 int iscsit_idx = iscsi_idx - iscsi_entries;
2344 int rdma_idx = iscsit_idx - iscsit_entries;
2343 int ciq_idx = rdma_idx - rdma_entries; 2345 int ciq_idx = rdma_idx - rdma_entries;
2344 int ctrl_idx = ciq_idx - ciq_entries; 2346 int ctrl_idx = ciq_idx - ciq_entries;
2345 int fq_idx = ctrl_idx - ctrl_entries; 2347 int fq_idx = ctrl_idx - ctrl_entries;
@@ -2453,6 +2455,35 @@ do { \
2453 RL("FLLow:", fl.low); 2455 RL("FLLow:", fl.low);
2454 RL("FLStarving:", fl.starving); 2456 RL("FLStarving:", fl.starving);
2455 2457
2458 } else if (iscsit_idx < iscsit_entries) {
2459 const struct sge_ofld_rxq *rx =
2460 &adap->sge.iscsitrxq[iscsit_idx * 4];
2461 int n = min(4, adap->sge.niscsitq - 4 * iscsit_idx);
2462
2463 S("QType:", "iSCSIT");
2464 R("RspQ ID:", rspq.abs_id);
2465 R("RspQ size:", rspq.size);
2466 R("RspQE size:", rspq.iqe_len);
2467 R("RspQ CIDX:", rspq.cidx);
2468 R("RspQ Gen:", rspq.gen);
2469 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
2470 S3("u", "Intr pktcnt:",
2471 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
2472 R("FL ID:", fl.cntxt_id);
2473 R("FL size:", fl.size - 8);
2474 R("FL pend:", fl.pend_cred);
2475 R("FL avail:", fl.avail);
2476 R("FL PIDX:", fl.pidx);
2477 R("FL CIDX:", fl.cidx);
2478 RL("RxPackets:", stats.pkts);
2479 RL("RxImmPkts:", stats.imm);
2480 RL("RxNoMem:", stats.nomem);
2481 RL("FLAllocErr:", fl.alloc_failed);
2482 RL("FLLrgAlcErr:", fl.large_alloc_failed);
2483 RL("FLMapErr:", fl.mapping_err);
2484 RL("FLLow:", fl.low);
2485 RL("FLStarving:", fl.starving);
2486
2456 } else if (rdma_idx < rdma_entries) { 2487 } else if (rdma_idx < rdma_entries) {
2457 const struct sge_ofld_rxq *rx = 2488 const struct sge_ofld_rxq *rx =
2458 &adap->sge.rdmarxq[rdma_idx * 4]; 2489 &adap->sge.rdmarxq[rdma_idx * 4];
@@ -2543,6 +2574,7 @@ static int sge_queue_entries(const struct adapter *adap)
2543{ 2574{
2544 return DIV_ROUND_UP(adap->sge.ethqsets, 4) + 2575 return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
2545 DIV_ROUND_UP(adap->sge.iscsiqsets, 4) + 2576 DIV_ROUND_UP(adap->sge.iscsiqsets, 4) +
2577 DIV_ROUND_UP(adap->sge.niscsitq, 4) +
2546 DIV_ROUND_UP(adap->sge.rdmaqs, 4) + 2578 DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
2547 DIV_ROUND_UP(adap->sge.rdmaciqs, 4) + 2579 DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
2548 DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; 2580 DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index adad73f7c8cd..a1e329ec24cd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -168,7 +168,8 @@ MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
168static int dflt_msg_enable = DFLT_MSG_ENABLE; 168static int dflt_msg_enable = DFLT_MSG_ENABLE;
169 169
170module_param(dflt_msg_enable, int, 0644); 170module_param(dflt_msg_enable, int, 0644);
171MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); 171MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, "
172 "deprecated parameter");
172 173
173/* 174/*
174 * The driver uses the best interrupt scheme available on a platform in the 175 * The driver uses the best interrupt scheme available on a platform in the
@@ -227,7 +228,7 @@ static DEFINE_MUTEX(uld_mutex);
227static LIST_HEAD(adap_rcu_list); 228static LIST_HEAD(adap_rcu_list);
228static DEFINE_SPINLOCK(adap_rcu_lock); 229static DEFINE_SPINLOCK(adap_rcu_lock);
229static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; 230static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
230static const char *uld_str[] = { "RDMA", "iSCSI" }; 231static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
231 232
232static void link_report(struct net_device *dev) 233static void link_report(struct net_device *dev)
233{ 234{
@@ -664,6 +665,13 @@ out:
664 return 0; 665 return 0;
665} 666}
666 667
668/* Flush the aggregated lro sessions */
669static void uldrx_flush_handler(struct sge_rspq *q)
670{
671 if (ulds[q->uld].lro_flush)
672 ulds[q->uld].lro_flush(&q->lro_mgr);
673}
674
667/** 675/**
668 * uldrx_handler - response queue handler for ULD queues 676 * uldrx_handler - response queue handler for ULD queues
669 * @q: the response queue that received the packet 677 * @q: the response queue that received the packet
@@ -677,6 +685,7 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
677 const struct pkt_gl *gl) 685 const struct pkt_gl *gl)
678{ 686{
679 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); 687 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
688 int ret;
680 689
681 /* FW can send CPLs encapsulated in a CPL_FW4_MSG. 690 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
682 */ 691 */
@@ -684,10 +693,19 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
684 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) 693 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
685 rsp += 2; 694 rsp += 2;
686 695
687 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { 696 if (q->flush_handler)
697 ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
698 rsp, gl, &q->lro_mgr,
699 &q->napi);
700 else
701 ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
702 rsp, gl);
703
704 if (ret) {
688 rxq->stats.nomem++; 705 rxq->stats.nomem++;
689 return -1; 706 return -1;
690 } 707 }
708
691 if (gl == NULL) 709 if (gl == NULL)
692 rxq->stats.imm++; 710 rxq->stats.imm++;
693 else if (gl == CXGB4_MSG_AN) 711 else if (gl == CXGB4_MSG_AN)
@@ -754,6 +772,10 @@ static void name_msix_vecs(struct adapter *adap)
754 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d", 772 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
755 adap->port[0]->name, i); 773 adap->port[0]->name, i);
756 774
775 for_each_iscsitrxq(&adap->sge, i)
776 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
777 adap->port[0]->name, i);
778
757 for_each_rdmarxq(&adap->sge, i) 779 for_each_rdmarxq(&adap->sge, i)
758 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", 780 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
759 adap->port[0]->name, i); 781 adap->port[0]->name, i);
@@ -767,6 +789,7 @@ static int request_msix_queue_irqs(struct adapter *adap)
767{ 789{
768 struct sge *s = &adap->sge; 790 struct sge *s = &adap->sge;
769 int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0; 791 int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
792 int iscsitqidx = 0;
770 int msi_index = 2; 793 int msi_index = 2;
771 794
772 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, 795 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
@@ -792,6 +815,15 @@ static int request_msix_queue_irqs(struct adapter *adap)
792 goto unwind; 815 goto unwind;
793 msi_index++; 816 msi_index++;
794 } 817 }
818 for_each_iscsitrxq(s, iscsitqidx) {
819 err = request_irq(adap->msix_info[msi_index].vec,
820 t4_sge_intr_msix, 0,
821 adap->msix_info[msi_index].desc,
822 &s->iscsitrxq[iscsitqidx].rspq);
823 if (err)
824 goto unwind;
825 msi_index++;
826 }
795 for_each_rdmarxq(s, rdmaqidx) { 827 for_each_rdmarxq(s, rdmaqidx) {
796 err = request_irq(adap->msix_info[msi_index].vec, 828 err = request_irq(adap->msix_info[msi_index].vec,
797 t4_sge_intr_msix, 0, 829 t4_sge_intr_msix, 0,
@@ -819,6 +851,9 @@ unwind:
819 while (--rdmaqidx >= 0) 851 while (--rdmaqidx >= 0)
820 free_irq(adap->msix_info[--msi_index].vec, 852 free_irq(adap->msix_info[--msi_index].vec,
821 &s->rdmarxq[rdmaqidx].rspq); 853 &s->rdmarxq[rdmaqidx].rspq);
854 while (--iscsitqidx >= 0)
855 free_irq(adap->msix_info[--msi_index].vec,
856 &s->iscsitrxq[iscsitqidx].rspq);
822 while (--iscsiqidx >= 0) 857 while (--iscsiqidx >= 0)
823 free_irq(adap->msix_info[--msi_index].vec, 858 free_irq(adap->msix_info[--msi_index].vec,
824 &s->iscsirxq[iscsiqidx].rspq); 859 &s->iscsirxq[iscsiqidx].rspq);
@@ -840,6 +875,9 @@ static void free_msix_queue_irqs(struct adapter *adap)
840 for_each_iscsirxq(s, i) 875 for_each_iscsirxq(s, i)
841 free_irq(adap->msix_info[msi_index++].vec, 876 free_irq(adap->msix_info[msi_index++].vec,
842 &s->iscsirxq[i].rspq); 877 &s->iscsirxq[i].rspq);
878 for_each_iscsitrxq(s, i)
879 free_irq(adap->msix_info[msi_index++].vec,
880 &s->iscsitrxq[i].rspq);
843 for_each_rdmarxq(s, i) 881 for_each_rdmarxq(s, i)
844 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); 882 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
845 for_each_rdmaciq(s, i) 883 for_each_rdmaciq(s, i)
@@ -984,7 +1022,7 @@ static void enable_rx(struct adapter *adap)
984 1022
985static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, 1023static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
986 unsigned int nq, unsigned int per_chan, int msi_idx, 1024 unsigned int nq, unsigned int per_chan, int msi_idx,
987 u16 *ids) 1025 u16 *ids, bool lro)
988{ 1026{
989 int i, err; 1027 int i, err;
990 1028
@@ -994,7 +1032,9 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
994 err = t4_sge_alloc_rxq(adap, &q->rspq, false, 1032 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
995 adap->port[i / per_chan], 1033 adap->port[i / per_chan],
996 msi_idx, q->fl.size ? &q->fl : NULL, 1034 msi_idx, q->fl.size ? &q->fl : NULL,
997 uldrx_handler, 0); 1035 uldrx_handler,
1036 lro ? uldrx_flush_handler : NULL,
1037 0);
998 if (err) 1038 if (err)
999 return err; 1039 return err;
1000 memset(&q->stats, 0, sizeof(q->stats)); 1040 memset(&q->stats, 0, sizeof(q->stats));
@@ -1024,7 +1064,7 @@ static int setup_sge_queues(struct adapter *adap)
1024 msi_idx = 1; /* vector 0 is for non-queue interrupts */ 1064 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1025 else { 1065 else {
1026 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, 1066 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1027 NULL, NULL, -1); 1067 NULL, NULL, NULL, -1);
1028 if (err) 1068 if (err)
1029 return err; 1069 return err;
1030 msi_idx = -((int)s->intrq.abs_id + 1); 1070 msi_idx = -((int)s->intrq.abs_id + 1);
@@ -1044,7 +1084,7 @@ static int setup_sge_queues(struct adapter *adap)
1044 * new/deleted queues. 1084 * new/deleted queues.
1045 */ 1085 */
1046 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], 1086 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1047 msi_idx, NULL, fwevtq_handler, -1); 1087 msi_idx, NULL, fwevtq_handler, NULL, -1);
1048 if (err) { 1088 if (err) {
1049freeout: t4_free_sge_resources(adap); 1089freeout: t4_free_sge_resources(adap);
1050 return err; 1090 return err;
@@ -1062,6 +1102,7 @@ freeout: t4_free_sge_resources(adap);
1062 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, 1102 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1063 msi_idx, &q->fl, 1103 msi_idx, &q->fl,
1064 t4_ethrx_handler, 1104 t4_ethrx_handler,
1105 NULL,
1065 t4_get_mps_bg_map(adap, 1106 t4_get_mps_bg_map(adap,
1066 pi->tx_chan)); 1107 pi->tx_chan));
1067 if (err) 1108 if (err)
@@ -1087,18 +1128,19 @@ freeout: t4_free_sge_resources(adap);
1087 goto freeout; 1128 goto freeout;
1088 } 1129 }
1089 1130
1090#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \ 1131#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
1091 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \ 1132 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \
1092 if (err) \ 1133 if (err) \
1093 goto freeout; \ 1134 goto freeout; \
1094 if (msi_idx > 0) \ 1135 if (msi_idx > 0) \
1095 msi_idx += nq; \ 1136 msi_idx += nq; \
1096} while (0) 1137} while (0)
1097 1138
1098 ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq); 1139 ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
1099 ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq); 1140 ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true);
1141 ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false);
1100 j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */ 1142 j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
1101 ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq); 1143 ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false);
1102 1144
1103#undef ALLOC_OFLD_RXQS 1145#undef ALLOC_OFLD_RXQS
1104 1146
@@ -2430,6 +2472,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
2430 } else if (uld == CXGB4_ULD_ISCSI) { 2472 } else if (uld == CXGB4_ULD_ISCSI) {
2431 lli.rxq_ids = adap->sge.iscsi_rxq; 2473 lli.rxq_ids = adap->sge.iscsi_rxq;
2432 lli.nrxq = adap->sge.iscsiqsets; 2474 lli.nrxq = adap->sge.iscsiqsets;
2475 } else if (uld == CXGB4_ULD_ISCSIT) {
2476 lli.rxq_ids = adap->sge.iscsit_rxq;
2477 lli.nrxq = adap->sge.niscsitq;
2433 } 2478 }
2434 lli.ntxq = adap->sge.iscsiqsets; 2479 lli.ntxq = adap->sge.iscsiqsets;
2435 lli.nchan = adap->params.nports; 2480 lli.nchan = adap->params.nports;
@@ -2437,6 +2482,10 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
2437 lli.wr_cred = adap->params.ofldq_wr_cred; 2482 lli.wr_cred = adap->params.ofldq_wr_cred;
2438 lli.adapter_type = adap->params.chip; 2483 lli.adapter_type = adap->params.chip;
2439 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); 2484 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
2485 lli.iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
2486 lli.iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
2487 lli.iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
2488 lli.iscsi_ppm = &adap->iscsi_ppm;
2440 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk; 2489 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
2441 lli.udb_density = 1 << adap->params.sge.eq_qpp; 2490 lli.udb_density = 1 << adap->params.sge.eq_qpp;
2442 lli.ucq_density = 1 << adap->params.sge.iq_qpp; 2491 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
@@ -4336,6 +4385,9 @@ static void cfg_queues(struct adapter *adap)
4336 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) * 4385 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
4337 adap->params.nports; 4386 adap->params.nports;
4338 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports); 4387 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
4388
4389 if (!is_t4(adap->params.chip))
4390 s->niscsitq = s->iscsiqsets;
4339 } 4391 }
4340 4392
4341 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { 4393 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -4362,6 +4414,16 @@ static void cfg_queues(struct adapter *adap)
4362 r->fl.size = 72; 4414 r->fl.size = 72;
4363 } 4415 }
4364 4416
4417 if (!is_t4(adap->params.chip)) {
4418 for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) {
4419 struct sge_ofld_rxq *r = &s->iscsitrxq[i];
4420
4421 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
4422 r->rspq.uld = CXGB4_ULD_ISCSIT;
4423 r->fl.size = 72;
4424 }
4425 }
4426
4365 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { 4427 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4366 struct sge_ofld_rxq *r = &s->rdmarxq[i]; 4428 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4367 4429
@@ -4436,9 +4498,13 @@ static int enable_msix(struct adapter *adap)
4436 4498
4437 want = s->max_ethqsets + EXTRA_VECS; 4499 want = s->max_ethqsets + EXTRA_VECS;
4438 if (is_offload(adap)) { 4500 if (is_offload(adap)) {
4439 want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets; 4501 want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets +
4502 s->niscsitq;
4440 /* need nchan for each possible ULD */ 4503 /* need nchan for each possible ULD */
4441 ofld_need = 3 * nchan; 4504 if (is_t4(adap->params.chip))
4505 ofld_need = 3 * nchan;
4506 else
4507 ofld_need = 4 * nchan;
4442 } 4508 }
4443#ifdef CONFIG_CHELSIO_T4_DCB 4509#ifdef CONFIG_CHELSIO_T4_DCB
4444 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for 4510 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
@@ -4470,12 +4536,16 @@ static int enable_msix(struct adapter *adap)
4470 if (allocated < want) { 4536 if (allocated < want) {
4471 s->rdmaqs = nchan; 4537 s->rdmaqs = nchan;
4472 s->rdmaciqs = nchan; 4538 s->rdmaciqs = nchan;
4539
4540 if (!is_t4(adap->params.chip))
4541 s->niscsitq = nchan;
4473 } 4542 }
4474 4543
4475 /* leftovers go to OFLD */ 4544 /* leftovers go to OFLD */
4476 i = allocated - EXTRA_VECS - s->max_ethqsets - 4545 i = allocated - EXTRA_VECS - s->max_ethqsets -
4477 s->rdmaqs - s->rdmaciqs; 4546 s->rdmaqs - s->rdmaciqs - s->niscsitq;
4478 s->iscsiqsets = (i / nchan) * nchan; /* round down */ 4547 s->iscsiqsets = (i / nchan) * nchan; /* round down */
4548
4479 } 4549 }
4480 for (i = 0; i < allocated; ++i) 4550 for (i = 0; i < allocated; ++i)
4481 adap->msix_info[i].vec = entries[i].vector; 4551 adap->msix_info[i].vec = entries[i].vector;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c
new file mode 100644
index 000000000000..d88a7a7b2400
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c
@@ -0,0 +1,464 @@
1/*
2 * cxgb4_ppm.c: Chelsio common library for T4/T5 iSCSI PagePod Manager
3 *
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#include <linux/kernel.h>
14#include <linux/version.h>
15#include <linux/module.h>
16#include <linux/errno.h>
17#include <linux/types.h>
18#include <linux/debugfs.h>
19#include <linux/export.h>
20#include <linux/list.h>
21#include <linux/skbuff.h>
22#include <linux/pci.h>
23#include <linux/scatterlist.h>
24
25#include "cxgb4_ppm.h"
26
27/* Direct Data Placement -
28 * Directly place the iSCSI Data-In or Data-Out PDU's payload into
29 * pre-posted final destination host-memory buffers based on the
30 * Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT)
31 * in Data-Out PDUs. The host memory address is programmed into
32 * h/w in the format of pagepod entries. The location of the
33 * pagepod entry is encoded into ddp tag which is used as the base
34 * for ITT/TTT.
35 */
36
37/* Direct-Data Placement page size adjustment
38 */
39int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz)
40{
41 struct cxgbi_tag_format *tformat = &ppm->tformat;
42 int i;
43
44 for (i = 0; i < DDP_PGIDX_MAX; i++) {
45 if (pgsz == 1UL << (DDP_PGSZ_BASE_SHIFT +
46 tformat->pgsz_order[i])) {
47 pr_debug("%s: %s ppm, pgsz %lu -> idx %d.\n",
48 __func__, ppm->ndev->name, pgsz, i);
49 return i;
50 }
51 }
52 pr_info("ippm: ddp page size %lu not supported.\n", pgsz);
53 return DDP_PGIDX_MAX;
54}
55
56/* DDP setup & teardown
57 */
58static int ppm_find_unused_entries(unsigned long *bmap,
59 unsigned int max_ppods,
60 unsigned int start,
61 unsigned int nr,
62 unsigned int align_mask)
63{
64 unsigned long i;
65
66 i = bitmap_find_next_zero_area(bmap, max_ppods, start, nr, align_mask);
67
68 if (unlikely(i >= max_ppods) && (start > nr))
69 i = bitmap_find_next_zero_area(bmap, max_ppods, 0, start - 1,
70 align_mask);
71 if (unlikely(i >= max_ppods))
72 return -ENOSPC;
73
74 bitmap_set(bmap, i, nr);
75 return (int)i;
76}
77
78static void ppm_mark_entries(struct cxgbi_ppm *ppm, int i, int count,
79 unsigned long caller_data)
80{
81 struct cxgbi_ppod_data *pdata = ppm->ppod_data + i;
82
83 pdata->caller_data = caller_data;
84 pdata->npods = count;
85
86 if (pdata->color == ((1 << PPOD_IDX_SHIFT) - 1))
87 pdata->color = 0;
88 else
89 pdata->color++;
90}
91
92static int ppm_get_cpu_entries(struct cxgbi_ppm *ppm, unsigned int count,
93 unsigned long caller_data)
94{
95 struct cxgbi_ppm_pool *pool;
96 unsigned int cpu;
97 int i;
98
99 cpu = get_cpu();
100 pool = per_cpu_ptr(ppm->pool, cpu);
101 spin_lock_bh(&pool->lock);
102 put_cpu();
103
104 i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max,
105 pool->next, count, 0);
106 if (i < 0) {
107 pool->next = 0;
108 spin_unlock_bh(&pool->lock);
109 return -ENOSPC;
110 }
111
112 pool->next = i + count;
113 if (pool->next >= ppm->pool_index_max)
114 pool->next = 0;
115
116 spin_unlock_bh(&pool->lock);
117
118 pr_debug("%s: cpu %u, idx %d + %d (%d), next %u.\n",
119 __func__, cpu, i, count, i + cpu * ppm->pool_index_max,
120 pool->next);
121
122 i += cpu * ppm->pool_index_max;
123 ppm_mark_entries(ppm, i, count, caller_data);
124
125 return i;
126}
127
128static int ppm_get_entries(struct cxgbi_ppm *ppm, unsigned int count,
129 unsigned long caller_data)
130{
131 int i;
132
133 spin_lock_bh(&ppm->map_lock);
134 i = ppm_find_unused_entries(ppm->ppod_bmap, ppm->bmap_index_max,
135 ppm->next, count, 0);
136 if (i < 0) {
137 ppm->next = 0;
138 spin_unlock_bh(&ppm->map_lock);
139 pr_debug("ippm: NO suitable entries %u available.\n",
140 count);
141 return -ENOSPC;
142 }
143
144 ppm->next = i + count;
145 if (ppm->next >= ppm->bmap_index_max)
146 ppm->next = 0;
147
148 spin_unlock_bh(&ppm->map_lock);
149
150 pr_debug("%s: idx %d + %d (%d), next %u, caller_data 0x%lx.\n",
151 __func__, i, count, i + ppm->pool_rsvd, ppm->next,
152 caller_data);
153
154 i += ppm->pool_rsvd;
155 ppm_mark_entries(ppm, i, count, caller_data);
156
157 return i;
158}
159
160static void ppm_unmark_entries(struct cxgbi_ppm *ppm, int i, int count)
161{
162 pr_debug("%s: idx %d + %d.\n", __func__, i, count);
163
164 if (i < ppm->pool_rsvd) {
165 unsigned int cpu;
166 struct cxgbi_ppm_pool *pool;
167
168 cpu = i / ppm->pool_index_max;
169 i %= ppm->pool_index_max;
170
171 pool = per_cpu_ptr(ppm->pool, cpu);
172 spin_lock_bh(&pool->lock);
173 bitmap_clear(pool->bmap, i, count);
174
175 if (i < pool->next)
176 pool->next = i;
177 spin_unlock_bh(&pool->lock);
178
179 pr_debug("%s: cpu %u, idx %d, next %u.\n",
180 __func__, cpu, i, pool->next);
181 } else {
182 spin_lock_bh(&ppm->map_lock);
183
184 i -= ppm->pool_rsvd;
185 bitmap_clear(ppm->ppod_bmap, i, count);
186
187 if (i < ppm->next)
188 ppm->next = i;
189 spin_unlock_bh(&ppm->map_lock);
190
191 pr_debug("%s: idx %d, next %u.\n", __func__, i, ppm->next);
192 }
193}
194
195void cxgbi_ppm_ppod_release(struct cxgbi_ppm *ppm, u32 idx)
196{
197 struct cxgbi_ppod_data *pdata;
198
199 if (idx >= ppm->ppmax) {
200 pr_warn("ippm: idx too big %u > %u.\n", idx, ppm->ppmax);
201 return;
202 }
203
204 pdata = ppm->ppod_data + idx;
205 if (!pdata->npods) {
206 pr_warn("ippm: idx %u, npods 0.\n", idx);
207 return;
208 }
209
210 pr_debug("release idx %u, npods %u.\n", idx, pdata->npods);
211 ppm_unmark_entries(ppm, idx, pdata->npods);
212}
213EXPORT_SYMBOL(cxgbi_ppm_ppod_release);
214
215int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *ppm, unsigned short nr_pages,
216 u32 per_tag_pg_idx, u32 *ppod_idx,
217 u32 *ddp_tag, unsigned long caller_data)
218{
219 struct cxgbi_ppod_data *pdata;
220 unsigned int npods;
221 int idx = -1;
222 unsigned int hwidx;
223 u32 tag;
224
225 npods = (nr_pages + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
226 if (!npods) {
227 pr_warn("%s: pages %u -> npods %u, full.\n",
228 __func__, nr_pages, npods);
229 return -EINVAL;
230 }
231
232 /* grab from cpu pool first */
233 idx = ppm_get_cpu_entries(ppm, npods, caller_data);
234 /* try the general pool */
235 if (idx < 0)
236 idx = ppm_get_entries(ppm, npods, caller_data);
237 if (idx < 0) {
238 pr_debug("ippm: pages %u, nospc %u, nxt %u, 0x%lx.\n",
239 nr_pages, npods, ppm->next, caller_data);
240 return idx;
241 }
242
243 pdata = ppm->ppod_data + idx;
244 hwidx = ppm->base_idx + idx;
245
246 tag = cxgbi_ppm_make_ddp_tag(hwidx, pdata->color);
247
248 if (per_tag_pg_idx)
249 tag |= (per_tag_pg_idx << 30) & 0xC0000000;
250
251 *ppod_idx = idx;
252 *ddp_tag = tag;
253
254 pr_debug("ippm: sg %u, tag 0x%x(%u,%u), data 0x%lx.\n",
255 nr_pages, tag, idx, npods, caller_data);
256
257 return npods;
258}
259EXPORT_SYMBOL(cxgbi_ppm_ppods_reserve);
260
261void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag,
262 unsigned int tid, unsigned int offset,
263 unsigned int length,
264 struct cxgbi_pagepod_hdr *hdr)
265{
266 /* The ddp tag in pagepod should be with bit 31:30 set to 0.
267 * The ddp Tag on the wire should be with non-zero 31:30 to the peer
268 */
269 tag &= 0x3FFFFFFF;
270
271 hdr->vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
272
273 hdr->rsvd = 0;
274 hdr->pgsz_tag_clr = htonl(tag & ppm->tformat.idx_clr_mask);
275 hdr->max_offset = htonl(length);
276 hdr->page_offset = htonl(offset);
277
278 pr_debug("ippm: tag 0x%x, tid 0x%x, xfer %u, off %u.\n",
279 tag, tid, length, offset);
280}
281EXPORT_SYMBOL(cxgbi_ppm_make_ppod_hdr);
282
283static void ppm_free(struct cxgbi_ppm *ppm)
284{
285 vfree(ppm);
286}
287
288static void ppm_destroy(struct kref *kref)
289{
290 struct cxgbi_ppm *ppm = container_of(kref,
291 struct cxgbi_ppm,
292 refcnt);
293 pr_info("ippm: kref 0, destroy %s ppm 0x%p.\n",
294 ppm->ndev->name, ppm);
295
296 *ppm->ppm_pp = NULL;
297
298 free_percpu(ppm->pool);
299 ppm_free(ppm);
300}
301
302int cxgbi_ppm_release(struct cxgbi_ppm *ppm)
303{
304 if (ppm) {
305 int rv;
306
307 rv = kref_put(&ppm->refcnt, ppm_destroy);
308 return rv;
309 }
310 return 1;
311}
312
313static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
314 unsigned int *pcpu_ppmax)
315{
316 struct cxgbi_ppm_pool *pools;
317 unsigned int ppmax = (*total) / num_possible_cpus();
318 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
319 unsigned int bmap;
320 unsigned int alloc_sz;
321 unsigned int count = 0;
322 unsigned int cpu;
323
324 /* make sure per cpu pool fits into PCPU_MIN_UNIT_SIZE */
325 if (ppmax > max)
326 ppmax = max;
327
328 /* pool size must be multiple of unsigned long */
329 bmap = BITS_TO_LONGS(ppmax);
330 ppmax = (bmap * sizeof(unsigned long)) << 3;
331
332 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
333 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool));
334
335 if (!pools)
336 return NULL;
337
338 for_each_possible_cpu(cpu) {
339 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu);
340
341 memset(ppool, 0, alloc_sz);
342 spin_lock_init(&ppool->lock);
343 count += ppmax;
344 }
345
346 *total = count;
347 *pcpu_ppmax = ppmax;
348
349 return pools;
350}
351
352int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
353 struct pci_dev *pdev, void *lldev,
354 struct cxgbi_tag_format *tformat,
355 unsigned int ppmax,
356 unsigned int llimit,
357 unsigned int start,
358 unsigned int reserve_factor)
359{
360 struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
361 struct cxgbi_ppm_pool *pool = NULL;
362 unsigned int ppmax_pool = 0;
363 unsigned int pool_index_max = 0;
364 unsigned int alloc_sz;
365 unsigned int ppod_bmap_size;
366
367 if (ppm) {
368 pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
369 ndev->name, ppm_pp, ppm, ppm->ppmax, ppmax);
370 kref_get(&ppm->refcnt);
371 return 1;
372 }
373
374 if (reserve_factor) {
375 ppmax_pool = ppmax / reserve_factor;
376 pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
377
378 pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
379 ndev->name, ppmax, ppmax_pool, pool_index_max);
380 }
381
382 ppod_bmap_size = BITS_TO_LONGS(ppmax - ppmax_pool);
383 alloc_sz = sizeof(struct cxgbi_ppm) +
384 ppmax * (sizeof(struct cxgbi_ppod_data)) +
385 ppod_bmap_size * sizeof(unsigned long);
386
387 ppm = vmalloc(alloc_sz);
388 if (!ppm)
389 goto release_ppm_pool;
390
391 memset(ppm, 0, alloc_sz);
392
393 ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]);
394
395 if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) {
396 unsigned int start = ppmax - ppmax_pool;
397 unsigned int end = ppod_bmap_size >> 3;
398
399 bitmap_set(ppm->ppod_bmap, ppmax, end - start);
400 pr_info("%s: %u - %u < %u * 8, mask extra bits %u, %u.\n",
401 __func__, ppmax, ppmax_pool, ppod_bmap_size, start,
402 end);
403 }
404
405 spin_lock_init(&ppm->map_lock);
406 kref_init(&ppm->refcnt);
407
408 memcpy(&ppm->tformat, tformat, sizeof(struct cxgbi_tag_format));
409
410 ppm->ppm_pp = ppm_pp;
411 ppm->ndev = ndev;
412 ppm->pdev = pdev;
413 ppm->lldev = lldev;
414 ppm->ppmax = ppmax;
415 ppm->next = 0;
416 ppm->llimit = llimit;
417 ppm->base_idx = start > llimit ?
418 (start - llimit + 1) >> PPOD_SIZE_SHIFT : 0;
419 ppm->bmap_index_max = ppmax - ppmax_pool;
420
421 ppm->pool = pool;
422 ppm->pool_rsvd = ppmax_pool;
423 ppm->pool_index_max = pool_index_max;
424
425 /* check one more time */
426 if (*ppm_pp) {
427 ppm_free(ppm);
428 ppm = (struct cxgbi_ppm *)(*ppm_pp);
429
430 pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
431 ndev->name, ppm_pp, *ppm_pp, ppm->ppmax, ppmax);
432
433 kref_get(&ppm->refcnt);
434 return 1;
435 }
436 *ppm_pp = ppm;
437
438 ppm->tformat.pgsz_idx_dflt = cxgbi_ppm_find_page_index(ppm, PAGE_SIZE);
439
440 pr_info("ippm %s: ppm 0x%p, 0x%p, base %u/%u, pg %lu,%u, rsvd %u,%u.\n",
441 ndev->name, ppm_pp, ppm, ppm->base_idx, ppm->ppmax, PAGE_SIZE,
442 ppm->tformat.pgsz_idx_dflt, ppm->pool_rsvd,
443 ppm->pool_index_max);
444
445 return 0;
446
447release_ppm_pool:
448 free_percpu(pool);
449 return -ENOMEM;
450}
451EXPORT_SYMBOL(cxgbi_ppm_init);
452
453unsigned int cxgbi_tagmask_set(unsigned int ppmax)
454{
455 unsigned int bits = fls(ppmax);
456
457 if (bits > PPOD_IDX_MAX_SIZE)
458 bits = PPOD_IDX_MAX_SIZE;
459
460 pr_info("ippm: ppmax %u/0x%x -> bits %u, tagmask 0x%x.\n",
461 ppmax, ppmax, bits, 1 << (bits + PPOD_IDX_SHIFT));
462
463 return 1 << (bits + PPOD_IDX_SHIFT);
464}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h
new file mode 100644
index 000000000000..d48732673b75
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h
@@ -0,0 +1,310 @@
1/*
2 * cxgb4_ppm.h: Chelsio common library for T4/T5 iSCSI ddp operation
3 *
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB4PPM_H__
14#define __CXGB4PPM_H__
15
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/types.h>
19#include <linux/debugfs.h>
20#include <linux/list.h>
21#include <linux/netdevice.h>
22#include <linux/scatterlist.h>
23#include <linux/skbuff.h>
24#include <linux/vmalloc.h>
25#include <linux/bitmap.h>
26
27struct cxgbi_pagepod_hdr {
28 u32 vld_tid;
29 u32 pgsz_tag_clr;
30 u32 max_offset;
31 u32 page_offset;
32 u64 rsvd;
33};
34
35#define PPOD_PAGES_MAX 4
36struct cxgbi_pagepod {
37 struct cxgbi_pagepod_hdr hdr;
38 u64 addr[PPOD_PAGES_MAX + 1];
39};
40
41/* ddp tag format
42 * for a 32-bit tag:
43 * bit #
44 * 31 ..... ..... 0
45 * X Y...Y Z...Z, where
46 * ^ ^^^^^ ^^^^
47 * | | |____ when ddp bit = 0: color bits
48 * | |
49 * | |____ when ddp bit = 0: idx into the ddp memory region
50 * |
51 * |____ ddp bit: 0 - ddp tag, 1 - non-ddp tag
52 *
53 * [page selector:2] [sw/free bits] [0] [idx] [color:6]
54 */
55
56#define DDP_PGIDX_MAX 4
57#define DDP_PGSZ_BASE_SHIFT 12 /* base page 4K */
58
59struct cxgbi_task_tag_info {
60 unsigned char flags;
61#define CXGBI_PPOD_INFO_FLAG_VALID 0x1
62#define CXGBI_PPOD_INFO_FLAG_MAPPED 0x2
63 unsigned char cid;
64 unsigned short pg_shift;
65 unsigned int npods;
66 unsigned int idx;
67 unsigned int tag;
68 struct cxgbi_pagepod_hdr hdr;
69 int nents;
70 int nr_pages;
71 struct scatterlist *sgl;
72};
73
74struct cxgbi_tag_format {
75 unsigned char pgsz_order[DDP_PGIDX_MAX];
76 unsigned char pgsz_idx_dflt;
77 unsigned char free_bits:4;
78 unsigned char color_bits:4;
79 unsigned char idx_bits;
80 unsigned char rsvd_bits;
81 unsigned int no_ddp_mask;
82 unsigned int idx_mask;
83 unsigned int color_mask;
84 unsigned int idx_clr_mask;
85 unsigned int rsvd_mask;
86};
87
88struct cxgbi_ppod_data {
89 unsigned char pg_idx:2;
90 unsigned char color:6;
91 unsigned char chan_id;
92 unsigned short npods;
93 unsigned long caller_data;
94};
95
96/* per cpu ppm pool */
97struct cxgbi_ppm_pool {
98 unsigned int base; /* base index */
99 unsigned int next; /* next possible free index */
100 spinlock_t lock; /* ppm pool lock */
101 unsigned long bmap[0];
102} ____cacheline_aligned_in_smp;
103
104struct cxgbi_ppm {
105 struct kref refcnt;
106 struct net_device *ndev; /* net_device, 1st port */
107 struct pci_dev *pdev;
108 void *lldev;
109 void **ppm_pp;
110 struct cxgbi_tag_format tformat;
111 unsigned int ppmax;
112 unsigned int llimit;
113 unsigned int base_idx;
114
115 unsigned int pool_rsvd;
116 unsigned int pool_index_max;
117 struct cxgbi_ppm_pool __percpu *pool;
118 /* map lock */
119 spinlock_t map_lock; /* ppm map lock */
120 unsigned int bmap_index_max;
121 unsigned int next;
122 unsigned long *ppod_bmap;
123 struct cxgbi_ppod_data ppod_data[0];
124};
125
126#define DDP_THRESHOLD 512
127
128#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
129
130#define IPPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */
131#define PPOD_SIZE_SHIFT 6
132
133/* page pods are allocated in groups of this size (must be power of 2) */
134#define PPOD_CLUSTER_SIZE 16U
135
136#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
137#define ULPMEM_IDATA_MAX_NPPODS 3 /* (PPOD_SIZE * 3 + ulptx hdr) < 256B */
138#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
139
140#define PPOD_COLOR_SHIFT 0
141#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT)
142
143#define PPOD_IDX_SHIFT 6
144#define PPOD_IDX_MAX_SIZE 24
145
146#define PPOD_TID_SHIFT 0
147#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT)
148
149#define PPOD_TAG_SHIFT 6
150#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT)
151
152#define PPOD_VALID_SHIFT 24
153#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT)
154#define PPOD_VALID_FLAG PPOD_VALID(1U)
155
156#define PPOD_PI_EXTRACT_CTL_SHIFT 31
157#define PPOD_PI_EXTRACT_CTL(x) ((x) << PPOD_PI_EXTRACT_CTL_SHIFT)
158#define PPOD_PI_EXTRACT_CTL_FLAG V_PPOD_PI_EXTRACT_CTL(1U)
159
160#define PPOD_PI_TYPE_SHIFT 29
161#define PPOD_PI_TYPE_MASK 0x3
162#define PPOD_PI_TYPE(x) ((x) << PPOD_PI_TYPE_SHIFT)
163
164#define PPOD_PI_CHECK_CTL_SHIFT 27
165#define PPOD_PI_CHECK_CTL_MASK 0x3
166#define PPOD_PI_CHECK_CTL(x) ((x) << PPOD_PI_CHECK_CTL_SHIFT)
167
168#define PPOD_PI_REPORT_CTL_SHIFT 25
169#define PPOD_PI_REPORT_CTL_MASK 0x3
170#define PPOD_PI_REPORT_CTL(x) ((x) << PPOD_PI_REPORT_CTL_SHIFT)
171
172static inline int cxgbi_ppm_is_ddp_tag(struct cxgbi_ppm *ppm, u32 tag)
173{
174 return !(tag & ppm->tformat.no_ddp_mask);
175}
176
177static inline int cxgbi_ppm_sw_tag_is_usable(struct cxgbi_ppm *ppm,
178 u32 tag)
179{
180 /* the sw tag must be using <= 31 bits */
181 return !(tag & 0x80000000U);
182}
183
184static inline int cxgbi_ppm_make_non_ddp_tag(struct cxgbi_ppm *ppm,
185 u32 sw_tag,
186 u32 *final_tag)
187{
188 struct cxgbi_tag_format *tformat = &ppm->tformat;
189
190 if (!cxgbi_ppm_sw_tag_is_usable(ppm, sw_tag)) {
191 pr_info("sw_tag 0x%x NOT usable.\n", sw_tag);
192 return -EINVAL;
193 }
194
195 if (!sw_tag) {
196 *final_tag = tformat->no_ddp_mask;
197 } else {
198 unsigned int shift = tformat->idx_bits + tformat->color_bits;
199 u32 lower = sw_tag & tformat->idx_clr_mask;
200 u32 upper = (sw_tag >> shift) << (shift + 1);
201
202 *final_tag = upper | tformat->no_ddp_mask | lower;
203 }
204 return 0;
205}
206
207static inline u32 cxgbi_ppm_decode_non_ddp_tag(struct cxgbi_ppm *ppm,
208 u32 tag)
209{
210 struct cxgbi_tag_format *tformat = &ppm->tformat;
211 unsigned int shift = tformat->idx_bits + tformat->color_bits;
212 u32 lower = tag & tformat->idx_clr_mask;
213 u32 upper = (tag >> tformat->rsvd_bits) << shift;
214
215 return upper | lower;
216}
217
218static inline u32 cxgbi_ppm_ddp_tag_get_idx(struct cxgbi_ppm *ppm,
219 u32 ddp_tag)
220{
221 u32 hw_idx = (ddp_tag >> PPOD_IDX_SHIFT) &
222 ppm->tformat.idx_mask;
223
224 return hw_idx - ppm->base_idx;
225}
226
227static inline u32 cxgbi_ppm_make_ddp_tag(unsigned int hw_idx,
228 unsigned char color)
229{
230 return (hw_idx << PPOD_IDX_SHIFT) | ((u32)color);
231}
232
233static inline unsigned long
234cxgbi_ppm_get_tag_caller_data(struct cxgbi_ppm *ppm,
235 u32 ddp_tag)
236{
237 u32 idx = cxgbi_ppm_ddp_tag_get_idx(ppm, ddp_tag);
238
239 return ppm->ppod_data[idx].caller_data;
240}
241
242/* sw bits are the free bits */
243static inline int cxgbi_ppm_ddp_tag_update_sw_bits(struct cxgbi_ppm *ppm,
244 u32 val, u32 orig_tag,
245 u32 *final_tag)
246{
247 struct cxgbi_tag_format *tformat = &ppm->tformat;
248 u32 v = val >> tformat->free_bits;
249
250 if (v) {
251 pr_info("sw_bits 0x%x too large, avail bits %u.\n",
252 val, tformat->free_bits);
253 return -EINVAL;
254 }
255 if (!cxgbi_ppm_is_ddp_tag(ppm, orig_tag))
256 return -EINVAL;
257
258 *final_tag = (val << tformat->rsvd_bits) |
259 (orig_tag & ppm->tformat.rsvd_mask);
260 return 0;
261}
262
263static inline void cxgbi_ppm_ppod_clear(struct cxgbi_pagepod *ppod)
264{
265 ppod->hdr.vld_tid = 0U;
266}
267
268static inline void cxgbi_tagmask_check(unsigned int tagmask,
269 struct cxgbi_tag_format *tformat)
270{
271 unsigned int bits = fls(tagmask);
272
273 /* reserve top most 2 bits for page selector */
274 tformat->free_bits = 32 - 2 - bits;
275 tformat->rsvd_bits = bits;
276 tformat->color_bits = PPOD_IDX_SHIFT;
277 tformat->idx_bits = bits - 1 - PPOD_IDX_SHIFT;
278 tformat->no_ddp_mask = 1 << (bits - 1);
279 tformat->idx_mask = (1 << tformat->idx_bits) - 1;
280 tformat->color_mask = (1 << PPOD_IDX_SHIFT) - 1;
281 tformat->idx_clr_mask = (1 << (bits - 1)) - 1;
282 tformat->rsvd_mask = (1 << bits) - 1;
283
284 pr_info("ippm: tagmask 0x%x, rsvd %u=%u+%u+1, mask 0x%x,0x%x, "
285 "pg %u,%u,%u,%u.\n",
286 tagmask, tformat->rsvd_bits, tformat->idx_bits,
287 tformat->color_bits, tformat->no_ddp_mask, tformat->rsvd_mask,
288 tformat->pgsz_order[0], tformat->pgsz_order[1],
289 tformat->pgsz_order[2], tformat->pgsz_order[3]);
290}
291
292int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz);
293void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag,
294 unsigned int tid, unsigned int offset,
295 unsigned int length,
296 struct cxgbi_pagepod_hdr *hdr);
297void cxgbi_ppm_ppod_release(struct cxgbi_ppm *, u32 idx);
298int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *, unsigned short nr_pages,
299 u32 per_tag_pg_idx, u32 *ppod_idx, u32 *ddp_tag,
300 unsigned long caller_data);
301int cxgbi_ppm_init(void **ppm_pp, struct net_device *, struct pci_dev *,
302 void *lldev, struct cxgbi_tag_format *,
303 unsigned int ppmax, unsigned int llimit,
304 unsigned int start,
305 unsigned int reserve_factor);
306int cxgbi_ppm_release(struct cxgbi_ppm *ppm);
307void cxgbi_tagmask_check(unsigned int tagmask, struct cxgbi_tag_format *);
308unsigned int cxgbi_tagmask_set(unsigned int ppmax);
309
310#endif /*__CXGB4PPM_H__*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index cf711d5f15be..f3c58aaa932d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -191,6 +191,7 @@ static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
191enum cxgb4_uld { 191enum cxgb4_uld {
192 CXGB4_ULD_RDMA, 192 CXGB4_ULD_RDMA,
193 CXGB4_ULD_ISCSI, 193 CXGB4_ULD_ISCSI,
194 CXGB4_ULD_ISCSIT,
194 CXGB4_ULD_MAX 195 CXGB4_ULD_MAX
195}; 196};
196 197
@@ -212,6 +213,7 @@ struct l2t_data;
212struct net_device; 213struct net_device;
213struct pkt_gl; 214struct pkt_gl;
214struct tp_tcp_stats; 215struct tp_tcp_stats;
216struct t4_lro_mgr;
215 217
216struct cxgb4_range { 218struct cxgb4_range {
217 unsigned int start; 219 unsigned int start;
@@ -273,6 +275,10 @@ struct cxgb4_lld_info {
273 unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */ 275 unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */
274 unsigned int max_ird_adapter; /* Max IRD memory per adapter */ 276 unsigned int max_ird_adapter; /* Max IRD memory per adapter */
275 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ 277 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
278 unsigned int iscsi_tagmask; /* iscsi ddp tag mask */
279 unsigned int iscsi_pgsz_order; /* iscsi ddp page size orders */
280 unsigned int iscsi_llimit; /* chip's iscsi region llimit */
281 void **iscsi_ppm; /* iscsi page pod manager */
276 int nodeid; /* device numa node id */ 282 int nodeid; /* device numa node id */
277}; 283};
278 284
@@ -283,6 +289,11 @@ struct cxgb4_uld_info {
283 const struct pkt_gl *gl); 289 const struct pkt_gl *gl);
284 int (*state_change)(void *handle, enum cxgb4_state new_state); 290 int (*state_change)(void *handle, enum cxgb4_state new_state);
285 int (*control)(void *handle, enum cxgb4_control control, ...); 291 int (*control)(void *handle, enum cxgb4_control control, ...);
292 int (*lro_rx_handler)(void *handle, const __be64 *rsp,
293 const struct pkt_gl *gl,
294 struct t4_lro_mgr *lro_mgr,
295 struct napi_struct *napi);
296 void (*lro_flush)(struct t4_lro_mgr *);
286}; 297};
287 298
288int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); 299int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 5b0f3ef348e9..60a26037a1c6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -48,8 +48,6 @@
48#include "t4_regs.h" 48#include "t4_regs.h"
49#include "t4_values.h" 49#include "t4_values.h"
50 50
51#define VLAN_NONE 0xfff
52
53/* identifies sync vs async L2T_WRITE_REQs */ 51/* identifies sync vs async L2T_WRITE_REQs */
54#define SYNC_WR_S 12 52#define SYNC_WR_S 12
55#define SYNC_WR_V(x) ((x) << SYNC_WR_S) 53#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 4e2d47ac102b..79665bd8f881 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -39,6 +39,8 @@
39#include <linux/if_ether.h> 39#include <linux/if_ether.h>
40#include <linux/atomic.h> 40#include <linux/atomic.h>
41 41
42#define VLAN_NONE 0xfff
43
42enum { L2T_SIZE = 4096 }; /* # of L2T entries */ 44enum { L2T_SIZE = 4096 }; /* # of L2T entries */
43 45
44enum { 46enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index deca4a2956cc..13b144bcf725 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2157,8 +2157,11 @@ static int process_responses(struct sge_rspq *q, int budget)
2157 2157
2158 while (likely(budget_left)) { 2158 while (likely(budget_left)) {
2159 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 2159 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2160 if (!is_new_response(rc, q)) 2160 if (!is_new_response(rc, q)) {
2161 if (q->flush_handler)
2162 q->flush_handler(q);
2161 break; 2163 break;
2164 }
2162 2165
2163 dma_rmb(); 2166 dma_rmb();
2164 rsp_type = RSPD_TYPE_G(rc->type_gen); 2167 rsp_type = RSPD_TYPE_G(rc->type_gen);
@@ -2544,7 +2547,8 @@ static void __iomem *bar2_address(struct adapter *adapter,
2544 */ 2547 */
2545int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 2548int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2546 struct net_device *dev, int intr_idx, 2549 struct net_device *dev, int intr_idx,
2547 struct sge_fl *fl, rspq_handler_t hnd, int cong) 2550 struct sge_fl *fl, rspq_handler_t hnd,
2551 rspq_flush_handler_t flush_hnd, int cong)
2548{ 2552{
2549 int ret, flsz = 0; 2553 int ret, flsz = 0;
2550 struct fw_iq_cmd c; 2554 struct fw_iq_cmd c;
@@ -2648,6 +2652,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2648 iq->size--; /* subtract status entry */ 2652 iq->size--; /* subtract status entry */
2649 iq->netdev = dev; 2653 iq->netdev = dev;
2650 iq->handler = hnd; 2654 iq->handler = hnd;
2655 iq->flush_handler = flush_hnd;
2656
2657 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
2658 skb_queue_head_init(&iq->lro_mgr.lroq);
2651 2659
2652 /* set offset to -1 to distinguish ingress queues without FL */ 2660 /* set offset to -1 to distinguish ingress queues without FL */
2653 iq->offset = fl ? 0 : -1; 2661 iq->offset = fl ? 0 : -1;
@@ -2992,6 +3000,7 @@ void t4_free_sge_resources(struct adapter *adap)
2992 3000
2993 /* clean up RDMA and iSCSI Rx queues */ 3001 /* clean up RDMA and iSCSI Rx queues */
2994 t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq); 3002 t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq);
3003 t4_free_ofld_rxqs(adap, adap->sge.niscsitq, adap->sge.iscsitrxq);
2995 t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq); 3004 t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
2996 t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq); 3005 t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
2997 3006
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 1d2d1da40c80..80417fc564d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -51,6 +51,7 @@ enum {
51 CPL_TX_PKT = 0xE, 51 CPL_TX_PKT = 0xE,
52 CPL_L2T_WRITE_REQ = 0x12, 52 CPL_L2T_WRITE_REQ = 0x12,
53 CPL_TID_RELEASE = 0x1A, 53 CPL_TID_RELEASE = 0x1A,
54 CPL_TX_DATA_ISO = 0x1F,
54 55
55 CPL_CLOSE_LISTSRV_RPL = 0x20, 56 CPL_CLOSE_LISTSRV_RPL = 0x20,
56 CPL_L2T_WRITE_RPL = 0x23, 57 CPL_L2T_WRITE_RPL = 0x23,
@@ -344,6 +345,87 @@ struct cpl_pass_open_rpl {
344 u8 status; 345 u8 status;
345}; 346};
346 347
348struct tcp_options {
349 __be16 mss;
350 __u8 wsf;
351#if defined(__LITTLE_ENDIAN_BITFIELD)
352 __u8:4;
353 __u8 unknown:1;
354 __u8:1;
355 __u8 sack:1;
356 __u8 tstamp:1;
357#else
358 __u8 tstamp:1;
359 __u8 sack:1;
360 __u8:1;
361 __u8 unknown:1;
362 __u8:4;
363#endif
364};
365
366struct cpl_pass_accept_req {
367 union opcode_tid ot;
368 __be16 rsvd;
369 __be16 len;
370 __be32 hdr_len;
371 __be16 vlan;
372 __be16 l2info;
373 __be32 tos_stid;
374 struct tcp_options tcpopt;
375};
376
377/* cpl_pass_accept_req.hdr_len fields */
378#define SYN_RX_CHAN_S 0
379#define SYN_RX_CHAN_M 0xF
380#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
381#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
382
383#define TCP_HDR_LEN_S 10
384#define TCP_HDR_LEN_M 0x3F
385#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
386#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
387
388#define IP_HDR_LEN_S 16
389#define IP_HDR_LEN_M 0x3FF
390#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
391#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
392
393#define ETH_HDR_LEN_S 26
394#define ETH_HDR_LEN_M 0x1F
395#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
396#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
397
398/* cpl_pass_accept_req.l2info fields */
399#define SYN_MAC_IDX_S 0
400#define SYN_MAC_IDX_M 0x1FF
401#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
402#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
403
404#define SYN_XACT_MATCH_S 9
405#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
406#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U)
407
408#define SYN_INTF_S 12
409#define SYN_INTF_M 0xF
410#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
411#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
412
413enum { /* TCP congestion control algorithms */
414 CONG_ALG_RENO,
415 CONG_ALG_TAHOE,
416 CONG_ALG_NEWRENO,
417 CONG_ALG_HIGHSPEED
418};
419
420#define CONG_CNTRL_S 14
421#define CONG_CNTRL_M 0x3
422#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
423#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
424
425#define T5_ISS_S 18
426#define T5_ISS_V(x) ((x) << T5_ISS_S)
427#define T5_ISS_F T5_ISS_V(1U)
428
347struct cpl_pass_accept_rpl { 429struct cpl_pass_accept_rpl {
348 WR_HDR; 430 WR_HDR;
349 union opcode_tid ot; 431 union opcode_tid ot;
@@ -818,6 +900,110 @@ struct cpl_iscsi_hdr {
818#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S) 900#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S)
819#define ISCSI_DDP_F ISCSI_DDP_V(1U) 901#define ISCSI_DDP_F ISCSI_DDP_V(1U)
820 902
903struct cpl_rx_data_ddp {
904 union opcode_tid ot;
905 __be16 urg;
906 __be16 len;
907 __be32 seq;
908 union {
909 __be32 nxt_seq;
910 __be32 ddp_report;
911 };
912 __be32 ulp_crc;
913 __be32 ddpvld;
914};
915
916#define cpl_rx_iscsi_ddp cpl_rx_data_ddp
917
918struct cpl_iscsi_data {
919 union opcode_tid ot;
920 __u8 rsvd0[2];
921 __be16 len;
922 __be32 seq;
923 __be16 urg;
924 __u8 rsvd1;
925 __u8 status;
926};
927
928struct cpl_tx_data_iso {
929 __be32 op_to_scsi;
930 __u8 reserved1;
931 __u8 ahs_len;
932 __be16 mpdu;
933 __be32 burst_size;
934 __be32 len;
935 __be32 reserved2_seglen_offset;
936 __be32 datasn_offset;
937 __be32 buffer_offset;
938 __be32 reserved3;
939
940 /* encapsulated CPL_TX_DATA follows here */
941};
942
943/* cpl_tx_data_iso.op_to_scsi fields */
944#define CPL_TX_DATA_ISO_OP_S 24
945#define CPL_TX_DATA_ISO_OP_M 0xff
946#define CPL_TX_DATA_ISO_OP_V(x) ((x) << CPL_TX_DATA_ISO_OP_S)
947#define CPL_TX_DATA_ISO_OP_G(x) \
948 (((x) >> CPL_TX_DATA_ISO_OP_S) & CPL_TX_DATA_ISO_OP_M)
949
950#define CPL_TX_DATA_ISO_FIRST_S 23
951#define CPL_TX_DATA_ISO_FIRST_M 0x1
952#define CPL_TX_DATA_ISO_FIRST_V(x) ((x) << CPL_TX_DATA_ISO_FIRST_S)
953#define CPL_TX_DATA_ISO_FIRST_G(x) \
954 (((x) >> CPL_TX_DATA_ISO_FIRST_S) & CPL_TX_DATA_ISO_FIRST_M)
955#define CPL_TX_DATA_ISO_FIRST_F CPL_TX_DATA_ISO_FIRST_V(1U)
956
957#define CPL_TX_DATA_ISO_LAST_S 22
958#define CPL_TX_DATA_ISO_LAST_M 0x1
959#define CPL_TX_DATA_ISO_LAST_V(x) ((x) << CPL_TX_DATA_ISO_LAST_S)
960#define CPL_TX_DATA_ISO_LAST_G(x) \
961 (((x) >> CPL_TX_DATA_ISO_LAST_S) & CPL_TX_DATA_ISO_LAST_M)
962#define CPL_TX_DATA_ISO_LAST_F CPL_TX_DATA_ISO_LAST_V(1U)
963
964#define CPL_TX_DATA_ISO_CPLHDRLEN_S 21
965#define CPL_TX_DATA_ISO_CPLHDRLEN_M 0x1
966#define CPL_TX_DATA_ISO_CPLHDRLEN_V(x) ((x) << CPL_TX_DATA_ISO_CPLHDRLEN_S)
967#define CPL_TX_DATA_ISO_CPLHDRLEN_G(x) \
968 (((x) >> CPL_TX_DATA_ISO_CPLHDRLEN_S) & CPL_TX_DATA_ISO_CPLHDRLEN_M)
969#define CPL_TX_DATA_ISO_CPLHDRLEN_F CPL_TX_DATA_ISO_CPLHDRLEN_V(1U)
970
971#define CPL_TX_DATA_ISO_HDRCRC_S 20
972#define CPL_TX_DATA_ISO_HDRCRC_M 0x1
973#define CPL_TX_DATA_ISO_HDRCRC_V(x) ((x) << CPL_TX_DATA_ISO_HDRCRC_S)
974#define CPL_TX_DATA_ISO_HDRCRC_G(x) \
975 (((x) >> CPL_TX_DATA_ISO_HDRCRC_S) & CPL_TX_DATA_ISO_HDRCRC_M)
976#define CPL_TX_DATA_ISO_HDRCRC_F CPL_TX_DATA_ISO_HDRCRC_V(1U)
977
978#define CPL_TX_DATA_ISO_PLDCRC_S 19
979#define CPL_TX_DATA_ISO_PLDCRC_M 0x1
980#define CPL_TX_DATA_ISO_PLDCRC_V(x) ((x) << CPL_TX_DATA_ISO_PLDCRC_S)
981#define CPL_TX_DATA_ISO_PLDCRC_G(x) \
982 (((x) >> CPL_TX_DATA_ISO_PLDCRC_S) & CPL_TX_DATA_ISO_PLDCRC_M)
983#define CPL_TX_DATA_ISO_PLDCRC_F CPL_TX_DATA_ISO_PLDCRC_V(1U)
984
985#define CPL_TX_DATA_ISO_IMMEDIATE_S 18
986#define CPL_TX_DATA_ISO_IMMEDIATE_M 0x1
987#define CPL_TX_DATA_ISO_IMMEDIATE_V(x) ((x) << CPL_TX_DATA_ISO_IMMEDIATE_S)
988#define CPL_TX_DATA_ISO_IMMEDIATE_G(x) \
989 (((x) >> CPL_TX_DATA_ISO_IMMEDIATE_S) & CPL_TX_DATA_ISO_IMMEDIATE_M)
990#define CPL_TX_DATA_ISO_IMMEDIATE_F CPL_TX_DATA_ISO_IMMEDIATE_V(1U)
991
992#define CPL_TX_DATA_ISO_SCSI_S 16
993#define CPL_TX_DATA_ISO_SCSI_M 0x3
994#define CPL_TX_DATA_ISO_SCSI_V(x) ((x) << CPL_TX_DATA_ISO_SCSI_S)
995#define CPL_TX_DATA_ISO_SCSI_G(x) \
996 (((x) >> CPL_TX_DATA_ISO_SCSI_S) & CPL_TX_DATA_ISO_SCSI_M)
997
998/* cpl_tx_data_iso.reserved2_seglen_offset fields */
999#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_S 0
1000#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_M 0xffffff
1001#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(x) \
1002 ((x) << CPL_TX_DATA_ISO_SEGLEN_OFFSET_S)
1003#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_G(x) \
1004 (((x) >> CPL_TX_DATA_ISO_SEGLEN_OFFSET_S) & \
1005 CPL_TX_DATA_ISO_SEGLEN_OFFSET_M)
1006
821struct cpl_rx_data { 1007struct cpl_rx_data {
822 union opcode_tid ot; 1008 union opcode_tid ot;
823 __be16 rsvd; 1009 __be16 rsvd;
@@ -854,6 +1040,15 @@ struct cpl_rx_data_ack {
854#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S) 1040#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S)
855#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U) 1041#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U)
856 1042
1043#define RX_DACK_MODE_S 29
1044#define RX_DACK_MODE_M 0x3
1045#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
1046#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
1047
1048#define RX_DACK_CHANGE_S 31
1049#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
1050#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U)
1051
857struct cpl_rx_pkt { 1052struct cpl_rx_pkt {
858 struct rss_header rsshdr; 1053 struct rss_header rsshdr;
859 u8 opcode; 1054 u8 opcode;
@@ -1090,6 +1285,12 @@ struct cpl_fw4_ack {
1090 __be64 rsvd1; 1285 __be64 rsvd1;
1091}; 1286};
1092 1287
1288enum {
1289 CPL_FW4_ACK_FLAGS_SEQVAL = 0x1, /* seqn valid */
1290 CPL_FW4_ACK_FLAGS_CH = 0x2, /* channel change complete */
1291 CPL_FW4_ACK_FLAGS_FLOWC = 0x4, /* fw_flowc_wr complete */
1292};
1293
1093struct cpl_fw6_msg { 1294struct cpl_fw6_msg {
1094 u8 opcode; 1295 u8 opcode;
1095 u8 type; 1296 u8 type;
@@ -1115,6 +1316,17 @@ struct cpl_fw6_msg_ofld_connection_wr_rpl {
1115 __u8 rsvd[2]; 1316 __u8 rsvd[2];
1116}; 1317};
1117 1318
1319struct cpl_tx_data {
1320 union opcode_tid ot;
1321 __be32 len;
1322 __be32 rsvd;
1323 __be32 flags;
1324};
1325
1326/* cpl_tx_data.flags field */
1327#define TX_FORCE_S 13
1328#define TX_FORCE_V(x) ((x) << TX_FORCE_S)
1329
1118enum { 1330enum {
1119 ULP_TX_MEM_READ = 2, 1331 ULP_TX_MEM_READ = 2,
1120 ULP_TX_MEM_WRITE = 3, 1332 ULP_TX_MEM_WRITE = 3,
@@ -1143,6 +1355,11 @@ struct ulptx_sgl {
1143 struct ulptx_sge_pair sge[0]; 1355 struct ulptx_sge_pair sge[0];
1144}; 1356};
1145 1357
1358struct ulptx_idata {
1359 __be32 cmd_more;
1360 __be32 len;
1361};
1362
1146#define ULPTX_NSGE_S 0 1363#define ULPTX_NSGE_S 0
1147#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) 1364#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
1148 1365
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 06bc2d2e7a73..a2cdfc1261dc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -166,6 +166,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ 166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ 167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ 168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
169 CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
169 170
170 /* T6 adapters: 171 /* T6 adapters:
171 */ 172 */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index c8661c77b4e3..7ad6d4e75b2a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -101,6 +101,7 @@ enum fw_wr_opcodes {
101 FW_RI_BIND_MW_WR = 0x18, 101 FW_RI_BIND_MW_WR = 0x18,
102 FW_RI_FR_NSMR_WR = 0x19, 102 FW_RI_FR_NSMR_WR = 0x19,
103 FW_RI_INV_LSTAG_WR = 0x1a, 103 FW_RI_INV_LSTAG_WR = 0x1a,
104 FW_ISCSI_TX_DATA_WR = 0x45,
104 FW_LASTC2E_WR = 0x70 105 FW_LASTC2E_WR = 0x70
105}; 106};
106 107
@@ -561,7 +562,12 @@ enum fw_flowc_mnem {
561 FW_FLOWC_MNEM_SNDBUF, 562 FW_FLOWC_MNEM_SNDBUF,
562 FW_FLOWC_MNEM_MSS, 563 FW_FLOWC_MNEM_MSS,
563 FW_FLOWC_MNEM_TXDATAPLEN_MAX, 564 FW_FLOWC_MNEM_TXDATAPLEN_MAX,
564 FW_FLOWC_MNEM_SCHEDCLASS = 11, 565 FW_FLOWC_MNEM_TCPSTATE,
566 FW_FLOWC_MNEM_EOSTATE,
567 FW_FLOWC_MNEM_SCHEDCLASS,
568 FW_FLOWC_MNEM_DCBPRIO,
569 FW_FLOWC_MNEM_SND_SCALE,
570 FW_FLOWC_MNEM_RCV_SCALE,
565}; 571};
566 572
567struct fw_flowc_mnemval { 573struct fw_flowc_mnemval {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 1cc8a7a69457..730fec73d5a6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -74,7 +74,8 @@ static int dflt_msg_enable = DFLT_MSG_ENABLE;
74 74
75module_param(dflt_msg_enable, int, 0644); 75module_param(dflt_msg_enable, int, 0644);
76MODULE_PARM_DESC(dflt_msg_enable, 76MODULE_PARM_DESC(dflt_msg_enable,
77 "default adapter ethtool message level bitmap"); 77 "default adapter ethtool message level bitmap, "
78 "deprecated parameter");
78 79
79/* 80/*
80 * The driver uses the best interrupt scheme available on a platform in the 81 * The driver uses the best interrupt scheme available on a platform in the
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 37c081583084..08243c2ff4b4 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -943,8 +943,8 @@ fec_restart(struct net_device *ndev)
943 else 943 else
944 val &= ~FEC_RACC_OPTIONS; 944 val &= ~FEC_RACC_OPTIONS;
945 writel(val, fep->hwp + FEC_RACC); 945 writel(val, fep->hwp + FEC_RACC);
946 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
946 } 947 }
947 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
948#endif 948#endif
949 949
950 /* 950 /*
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 79a210aaf0bb..ea83712a6d62 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -35,6 +35,7 @@
35#include "fman.h" 35#include "fman.h"
36#include "fman_muram.h" 36#include "fman_muram.h"
37 37
38#include <linux/fsl/guts.h>
38#include <linux/slab.h> 39#include <linux/slab.h>
39#include <linux/delay.h> 40#include <linux/delay.h>
40#include <linux/module.h> 41#include <linux/module.h>
@@ -1871,6 +1872,90 @@ err_fm_state:
1871 return -EINVAL; 1872 return -EINVAL;
1872} 1873}
1873 1874
1875static int fman_reset(struct fman *fman)
1876{
1877 u32 count;
1878 int err = 0;
1879
1880 if (fman->state->rev_info.major < 6) {
1881 iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
1882 /* Wait for reset completion */
1883 count = 100;
1884 do {
1885 udelay(1);
1886 } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
1887 FPM_RSTC_FM_RESET) && --count);
1888 if (count == 0)
1889 err = -EBUSY;
1890
1891 goto _return;
1892 } else {
1893 struct device_node *guts_node;
1894 struct ccsr_guts __iomem *guts_regs;
1895 u32 devdisr2, reg;
1896
1897 /* Errata A007273 */
1898 guts_node =
1899 of_find_compatible_node(NULL, NULL,
1900 "fsl,qoriq-device-config-2.0");
1901 if (!guts_node) {
1902 dev_err(fman->dev, "%s: Couldn't find guts node\n",
1903 __func__);
1904 goto guts_node;
1905 }
1906
1907 guts_regs = of_iomap(guts_node, 0);
1908 if (!guts_regs) {
1909 dev_err(fman->dev, "%s: Couldn't map %s regs\n",
1910 __func__, guts_node->full_name);
1911 goto guts_regs;
1912 }
1913#define FMAN1_ALL_MACS_MASK 0xFCC00000
1914#define FMAN2_ALL_MACS_MASK 0x000FCC00
1915 /* Read current state */
1916 devdisr2 = ioread32be(&guts_regs->devdisr2);
1917 if (fman->dts_params.id == 0)
1918 reg = devdisr2 & ~FMAN1_ALL_MACS_MASK;
1919 else
1920 reg = devdisr2 & ~FMAN2_ALL_MACS_MASK;
1921
1922 /* Enable all MACs */
1923 iowrite32be(reg, &guts_regs->devdisr2);
1924
1925 /* Perform FMan reset */
1926 iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
1927
1928 /* Wait for reset completion */
1929 count = 100;
1930 do {
1931 udelay(1);
1932 } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
1933 FPM_RSTC_FM_RESET) && --count);
1934 if (count == 0) {
1935 iounmap(guts_regs);
1936 of_node_put(guts_node);
1937 err = -EBUSY;
1938 goto _return;
1939 }
1940
1941 /* Restore devdisr2 value */
1942 iowrite32be(devdisr2, &guts_regs->devdisr2);
1943
1944 iounmap(guts_regs);
1945 of_node_put(guts_node);
1946
1947 goto _return;
1948
1949guts_regs:
1950 of_node_put(guts_node);
1951guts_node:
1952 dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
1953 __func__);
1954 }
1955_return:
1956 return err;
1957}
1958
1874static int fman_init(struct fman *fman) 1959static int fman_init(struct fman *fman)
1875{ 1960{
1876 struct fman_cfg *cfg = NULL; 1961 struct fman_cfg *cfg = NULL;
@@ -1914,22 +1999,9 @@ static int fman_init(struct fman *fman)
1914 fman->liodn_base[i] = liodn_base; 1999 fman->liodn_base[i] = liodn_base;
1915 } 2000 }
1916 2001
1917 /* FMan Reset (supported only for FMan V2) */ 2002 err = fman_reset(fman);
1918 if (fman->state->rev_info.major >= 6) { 2003 if (err)
1919 /* Errata A007273 */ 2004 return err;
1920 dev_dbg(fman->dev, "%s: FManV3 reset is not supported!\n",
1921 __func__);
1922 } else {
1923 iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
1924 /* Wait for reset completion */
1925 count = 100;
1926 do {
1927 udelay(1);
1928 } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
1929 FPM_RSTC_FM_RESET) && --count);
1930 if (count == 0)
1931 return -EBUSY;
1932 }
1933 2005
1934 if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) { 2006 if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) {
1935 resume(fman->fpm_regs); 2007 resume(fman->fpm_regs);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 1cbcb9fa3fb5..e8d36aaea223 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -147,6 +147,8 @@ enum hnae_led_state {
147 147
148#define HNSV2_TXD_BUFNUM_S 0 148#define HNSV2_TXD_BUFNUM_S 0
149#define HNSV2_TXD_BUFNUM_M (0x7 << HNSV2_TXD_BUFNUM_S) 149#define HNSV2_TXD_BUFNUM_M (0x7 << HNSV2_TXD_BUFNUM_S)
150#define HNSV2_TXD_PORTID_S 4
151#define HNSV2_TXD_PORTID_M (0X7 << HNSV2_TXD_PORTID_S)
150#define HNSV2_TXD_RI_B 1 152#define HNSV2_TXD_RI_B 1
151#define HNSV2_TXD_L4CS_B 2 153#define HNSV2_TXD_L4CS_B 2
152#define HNSV2_TXD_L3CS_B 3 154#define HNSV2_TXD_L3CS_B 3
@@ -467,7 +469,7 @@ struct hnae_ae_ops {
467 u32 *tx_usecs, u32 *rx_usecs); 469 u32 *tx_usecs, u32 *rx_usecs);
468 void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle, 470 void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
469 u32 *tx_frames, u32 *rx_frames); 471 u32 *tx_frames, u32 *rx_frames);
470 void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout); 472 int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
471 int (*set_coalesce_frames)(struct hnae_handle *handle, 473 int (*set_coalesce_frames)(struct hnae_handle *handle,
472 u32 coalesce_frames); 474 u32 coalesce_frames);
473 void (*set_promisc_mode)(struct hnae_handle *handle, u32 en); 475 void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
@@ -516,6 +518,7 @@ struct hnae_handle {
516 int q_num; 518 int q_num;
517 int vf_id; 519 int vf_id;
518 u32 eport_id; 520 u32 eport_id;
521 u32 dport_id; /* v2 tx bd should fill the dport_id */
519 enum hnae_port_type port_type; 522 enum hnae_port_type port_type;
520 struct list_head node; /* list to hnae_ae_dev->handle_list */ 523 struct list_head node; /* list to hnae_ae_dev->handle_list */
521 struct hnae_buf_ops *bops; /* operation for the buffer */ 524 struct hnae_buf_ops *bops; /* operation for the buffer */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index d4f92ed322d6..159142272afb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -159,11 +159,6 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
159 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i]; 159 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
160 160
161 ring_pair_cb->used_by_vf = 1; 161 ring_pair_cb->used_by_vf = 1;
162 if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
163 ring_pair_cb->port_id_in_dsa = port_idx;
164 else
165 ring_pair_cb->port_id_in_dsa = 0;
166
167 ring_pair_cb++; 162 ring_pair_cb++;
168 } 163 }
169 164
@@ -175,6 +170,7 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
175 ae_handle->phy_node = vf_cb->mac_cb->phy_node; 170 ae_handle->phy_node = vf_cb->mac_cb->phy_node;
176 ae_handle->if_support = vf_cb->mac_cb->if_support; 171 ae_handle->if_support = vf_cb->mac_cb->if_support;
177 ae_handle->port_type = vf_cb->mac_cb->mac_type; 172 ae_handle->port_type = vf_cb->mac_cb->mac_type;
173 ae_handle->dport_id = port_idx;
178 174
179 return ae_handle; 175 return ae_handle;
180vf_id_err: 176vf_id_err:
@@ -403,11 +399,16 @@ static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
403static void hns_ae_get_pauseparam(struct hnae_handle *handle, 399static void hns_ae_get_pauseparam(struct hnae_handle *handle,
404 u32 *auto_neg, u32 *rx_en, u32 *tx_en) 400 u32 *auto_neg, u32 *rx_en, u32 *tx_en)
405{ 401{
406 assert(handle); 402 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
403 struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
407 404
408 hns_mac_get_autoneg(hns_get_mac_cb(handle), auto_neg); 405 hns_mac_get_autoneg(mac_cb, auto_neg);
409 406
410 hns_mac_get_pauseparam(hns_get_mac_cb(handle), rx_en, tx_en); 407 hns_mac_get_pauseparam(mac_cb, rx_en, tx_en);
408
409 /* Service port's pause feature is provided by DSAF, not mac */
410 if (handle->port_type == HNAE_PORT_SERVICE)
411 hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en);
411} 412}
412 413
413static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable) 414static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
@@ -419,7 +420,10 @@ static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
419 420
420static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en) 421static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
421{ 422{
423 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
424
422 hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en); 425 hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en);
426 hns_mac_set_promisc(mac_cb, (u8)!!en);
423} 427}
424 428
425static int hns_ae_get_autoneg(struct hnae_handle *handle) 429static int hns_ae_get_autoneg(struct hnae_handle *handle)
@@ -437,71 +441,67 @@ static int hns_ae_set_pauseparam(struct hnae_handle *handle,
437 u32 autoneg, u32 rx_en, u32 tx_en) 441 u32 autoneg, u32 rx_en, u32 tx_en)
438{ 442{
439 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); 443 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
444 struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
440 int ret; 445 int ret;
441 446
442 ret = hns_mac_set_autoneg(mac_cb, autoneg); 447 ret = hns_mac_set_autoneg(mac_cb, autoneg);
443 if (ret) 448 if (ret)
444 return ret; 449 return ret;
445 450
451 /* Service port's pause feature is provided by DSAF, not mac */
452 if (handle->port_type == HNAE_PORT_SERVICE) {
453 ret = hns_dsaf_set_rx_mac_pause_en(dsaf_dev,
454 mac_cb->mac_id, rx_en);
455 if (ret)
456 return ret;
457 rx_en = 0;
458 }
446 return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en); 459 return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
447} 460}
448 461
449static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle, 462static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
450 u32 *tx_usecs, u32 *rx_usecs) 463 u32 *tx_usecs, u32 *rx_usecs)
451{ 464{
452 int port; 465 struct ring_pair_cb *ring_pair =
453 466 container_of(handle->qs[0], struct ring_pair_cb, q);
454 port = hns_ae_map_eport_to_dport(handle->eport_id);
455 467
456 *tx_usecs = hns_rcb_get_coalesce_usecs( 468 *tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
457 hns_ae_get_dsaf_dev(handle->dev), 469 ring_pair->port_id_in_comm);
458 hns_dsaf_get_comm_idx_by_port(port)); 470 *rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
459 *rx_usecs = hns_rcb_get_coalesce_usecs( 471 ring_pair->port_id_in_comm);
460 hns_ae_get_dsaf_dev(handle->dev),
461 hns_dsaf_get_comm_idx_by_port(port));
462} 472}
463 473
464static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle, 474static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
465 u32 *tx_frames, u32 *rx_frames) 475 u32 *tx_frames, u32 *rx_frames)
466{ 476{
467 int port; 477 struct ring_pair_cb *ring_pair =
468 478 container_of(handle->qs[0], struct ring_pair_cb, q);
469 assert(handle);
470 479
471 port = hns_ae_map_eport_to_dport(handle->eport_id); 480 *tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
472 481 ring_pair->port_id_in_comm);
473 *tx_frames = hns_rcb_get_coalesced_frames( 482 *rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
474 hns_ae_get_dsaf_dev(handle->dev), port); 483 ring_pair->port_id_in_comm);
475 *rx_frames = hns_rcb_get_coalesced_frames(
476 hns_ae_get_dsaf_dev(handle->dev), port);
477} 484}
478 485
479static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle, 486static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
480 u32 timeout) 487 u32 timeout)
481{ 488{
482 int port; 489 struct ring_pair_cb *ring_pair =
483 490 container_of(handle->qs[0], struct ring_pair_cb, q);
484 assert(handle);
485
486 port = hns_ae_map_eport_to_dport(handle->eport_id);
487 491
488 hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev), 492 return hns_rcb_set_coalesce_usecs(
489 port, timeout); 493 ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
490} 494}
491 495
492static int hns_ae_set_coalesce_frames(struct hnae_handle *handle, 496static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
493 u32 coalesce_frames) 497 u32 coalesce_frames)
494{ 498{
495 int port; 499 struct ring_pair_cb *ring_pair =
496 int ret; 500 container_of(handle->qs[0], struct ring_pair_cb, q);
497
498 assert(handle);
499 501
500 port = hns_ae_map_eport_to_dport(handle->eport_id); 502 return hns_rcb_set_coalesced_frames(
501 503 ring_pair->rcb_common,
502 ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev), 504 ring_pair->port_id_in_comm, coalesce_frames);
503 port, coalesce_frames);
504 return ret;
505} 505}
506 506
507void hns_ae_update_stats(struct hnae_handle *handle, 507void hns_ae_update_stats(struct hnae_handle *handle,
@@ -787,7 +787,8 @@ static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key,
787 memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); 787 memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
788 788
789 /* update the current hash->queue mappings from the shadow RSS table */ 789 /* update the current hash->queue mappings from the shadow RSS table */
790 memcpy(indir, ppe_cb->rss_indir_table, HNS_PPEV2_RSS_IND_TBL_SIZE); 790 memcpy(indir, ppe_cb->rss_indir_table,
791 HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
791 792
792 return 0; 793 return 0;
793} 794}
@@ -799,10 +800,11 @@ static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir,
799 800
800 /* set the RSS Hash Key if specififed by the user */ 801 /* set the RSS Hash Key if specififed by the user */
801 if (key) 802 if (key)
802 hns_ppe_set_rss_key(ppe_cb, (int *)key); 803 hns_ppe_set_rss_key(ppe_cb, (u32 *)key);
803 804
804 /* update the shadow RSS table with user specified qids */ 805 /* update the shadow RSS table with user specified qids */
805 memcpy(ppe_cb->rss_indir_table, indir, HNS_PPEV2_RSS_IND_TBL_SIZE); 806 memcpy(ppe_cb->rss_indir_table, indir,
807 HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
806 808
807 /* now update the hardware */ 809 /* now update the hardware */
808 hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table); 810 hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index b8517b00e706..44abb08de155 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -290,6 +290,24 @@ static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
290 return 0; 290 return 0;
291} 291}
292 292
293static void hns_gmac_set_uc_match(void *mac_drv, u16 en)
294{
295 struct mac_driver *drv = mac_drv;
296
297 dsaf_set_dev_bit(drv, GMAC_REC_FILT_CONTROL_REG,
298 GMAC_UC_MATCH_EN_B, !en);
299 dsaf_set_dev_bit(drv, GMAC_STATION_ADDR_HIGH_2_REG,
300 GMAC_ADDR_EN_B, !en);
301}
302
303static void hns_gmac_set_promisc(void *mac_drv, u8 en)
304{
305 struct mac_driver *drv = mac_drv;
306
307 if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
308 hns_gmac_set_uc_match(mac_drv, en);
309}
310
293static void hns_gmac_init(void *mac_drv) 311static void hns_gmac_init(void *mac_drv)
294{ 312{
295 u32 port; 313 u32 port;
@@ -305,6 +323,8 @@ static void hns_gmac_init(void *mac_drv)
305 mdelay(10); 323 mdelay(10);
306 hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX); 324 hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
307 hns_gmac_tx_loop_pkt_dis(mac_drv); 325 hns_gmac_tx_loop_pkt_dis(mac_drv);
326 if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
327 hns_gmac_set_uc_match(mac_drv, 0);
308} 328}
309 329
310void hns_gmac_update_stats(void *mac_drv) 330void hns_gmac_update_stats(void *mac_drv)
@@ -402,14 +422,17 @@ static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr)
402{ 422{
403 struct mac_driver *drv = (struct mac_driver *)mac_drv; 423 struct mac_driver *drv = (struct mac_driver *)mac_drv;
404 424
405 if (drv->mac_id >= DSAF_SERVICE_NW_NUM) { 425 u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
406 u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
407 426
408 u32 low_val = mac_addr[5] | (mac_addr[4] << 8) 427 u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
409 | (mac_addr[3] << 16) | (mac_addr[2] << 24); 428 | (mac_addr[3] << 16) | (mac_addr[2] << 24);
410 dsaf_write_dev(drv, GMAC_STATION_ADDR_LOW_2_REG, low_val); 429
411 dsaf_write_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG, high_val); 430 u32 val = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG);
412 } 431 u32 sta_addr_en = dsaf_get_bit(val, GMAC_ADDR_EN_B);
432
433 dsaf_write_dev(drv, GMAC_STATION_ADDR_LOW_2_REG, low_val);
434 dsaf_write_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG,
435 high_val | (sta_addr_en << GMAC_ADDR_EN_B));
413} 436}
414 437
415static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode, 438static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
@@ -641,7 +664,8 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
641 return; 664 return;
642 665
643 for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) { 666 for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
644 snprintf(buff, ETH_GSTRING_LEN, g_gmac_stats_string[i].desc); 667 snprintf(buff, ETH_GSTRING_LEN, "%s",
668 g_gmac_stats_string[i].desc);
645 buff = buff + ETH_GSTRING_LEN; 669 buff = buff + ETH_GSTRING_LEN;
646 } 670 }
647} 671}
@@ -699,6 +723,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
699 mac_drv->get_sset_count = hns_gmac_get_sset_count; 723 mac_drv->get_sset_count = hns_gmac_get_sset_count;
700 mac_drv->get_strings = hns_gmac_get_strings; 724 mac_drv->get_strings = hns_gmac_get_strings;
701 mac_drv->update_stats = hns_gmac_update_stats; 725 mac_drv->update_stats = hns_gmac_update_stats;
726 mac_drv->set_promiscuous = hns_gmac_set_promisc;
702 727
703 return (void *)mac_drv; 728 return (void *)mac_drv;
704} 729}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 5ef0e96e918a..10c367d20955 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -439,9 +439,8 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
439 439
440void hns_mac_reset(struct hns_mac_cb *mac_cb) 440void hns_mac_reset(struct hns_mac_cb *mac_cb)
441{ 441{
442 struct mac_driver *drv; 442 struct mac_driver *drv = hns_mac_get_drv(mac_cb);
443 443 bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
444 drv = hns_mac_get_drv(mac_cb);
445 444
446 drv->mac_init(drv); 445 drv->mac_init(drv);
447 446
@@ -456,7 +455,7 @@ void hns_mac_reset(struct hns_mac_cb *mac_cb)
456 455
457 if (drv->mac_pausefrm_cfg) { 456 if (drv->mac_pausefrm_cfg) {
458 if (mac_cb->mac_type == HNAE_PORT_DEBUG) 457 if (mac_cb->mac_type == HNAE_PORT_DEBUG)
459 drv->mac_pausefrm_cfg(drv, 0, 0); 458 drv->mac_pausefrm_cfg(drv, !is_ver1, !is_ver1);
460 else /* mac rx must disable, dsaf pfc close instead of it*/ 459 else /* mac rx must disable, dsaf pfc close instead of it*/
461 drv->mac_pausefrm_cfg(drv, 0, 1); 460 drv->mac_pausefrm_cfg(drv, 0, 1);
462 } 461 }
@@ -467,8 +466,13 @@ int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
467 struct mac_driver *drv = hns_mac_get_drv(mac_cb); 466 struct mac_driver *drv = hns_mac_get_drv(mac_cb);
468 u32 buf_size = mac_cb->dsaf_dev->buf_size; 467 u32 buf_size = mac_cb->dsaf_dev->buf_size;
469 u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 468 u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
469 u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ?
470 MAC_MAX_MTU : MAC_MAX_MTU_V2;
471
472 if (mac_cb->mac_type == HNAE_PORT_DEBUG)
473 max_frm = MAC_MAX_MTU_DBG;
470 474
471 if ((new_mtu < MAC_MIN_MTU) || (new_frm > MAC_MAX_MTU) || 475 if ((new_mtu < MAC_MIN_MTU) || (new_frm > max_frm) ||
472 (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size)) 476 (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size))
473 return -EINVAL; 477 return -EINVAL;
474 478
@@ -556,14 +560,6 @@ void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
556 *rx_en = 0; 560 *rx_en = 0;
557 *tx_en = 0; 561 *tx_en = 0;
558 } 562 }
559
560 /* Due to the chip defect, the service mac's rx pause CAN'T be enabled.
561 * We set the rx pause frm always be true (1), because DSAF deals with
562 * the rx pause frm instead of service mac. After all, we still support
563 * rx pause frm.
564 */
565 if (mac_cb->mac_type == HNAE_PORT_SERVICE)
566 *rx_en = 1;
567} 563}
568 564
569/** 565/**
@@ -597,20 +593,13 @@ int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
597int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en) 593int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
598{ 594{
599 struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); 595 struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
596 bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
600 597
601 if (mac_cb->mac_type == HNAE_PORT_SERVICE) { 598 if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
602 if (!rx_en) { 599 if (is_ver1 && (tx_en || rx_en)) {
603 dev_err(mac_cb->dev, "disable rx_pause is not allowed!"); 600 dev_err(mac_cb->dev, "macv1 cann't enable tx/rx_pause!");
604 return -EINVAL;
605 }
606 } else if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
607 if (tx_en || rx_en) {
608 dev_err(mac_cb->dev, "enable tx_pause or enable rx_pause are not allowed!");
609 return -EINVAL; 601 return -EINVAL;
610 } 602 }
611 } else {
612 dev_err(mac_cb->dev, "Unsupport this operation!");
613 return -EINVAL;
614 } 603 }
615 604
616 if (mac_ctrl_drv->mac_pausefrm_cfg) 605 if (mac_ctrl_drv->mac_pausefrm_cfg)
@@ -861,6 +850,14 @@ int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset)
861 return mac_ctrl_drv->get_sset_count(stringset); 850 return mac_ctrl_drv->get_sset_count(stringset);
862} 851}
863 852
853void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en)
854{
855 struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
856
857 if (mac_ctrl_drv->set_promiscuous)
858 mac_ctrl_drv->set_promiscuous(mac_ctrl_drv, en);
859}
860
864int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb) 861int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb)
865{ 862{
866 struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); 863 struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 0b052191d751..823b6e78c8aa 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -26,7 +26,9 @@ struct dsaf_device;
26 26
27#define MAC_DEFAULT_MTU (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN) 27#define MAC_DEFAULT_MTU (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
28#define MAC_MAX_MTU 9600 28#define MAC_MAX_MTU 9600
29#define MAC_MAX_MTU_V2 9728
29#define MAC_MIN_MTU 68 30#define MAC_MIN_MTU 68
31#define MAC_MAX_MTU_DBG MAC_DEFAULT_MTU
30 32
31#define MAC_DEFAULT_PAUSE_TIME 0xff 33#define MAC_DEFAULT_PAUSE_TIME 0xff
32 34
@@ -365,7 +367,7 @@ struct mac_driver {
365 /*config rx pause enable*/ 367 /*config rx pause enable*/
366 void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable); 368 void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
367 /* config rx mode for promiscuous*/ 369 /* config rx mode for promiscuous*/
368 int (*set_promiscuous)(void *mac_drv, u8 enable); 370 void (*set_promiscuous)(void *mac_drv, u8 enable);
369 /* get mac id */ 371 /* get mac id */
370 void (*mac_get_id)(void *mac_drv, u8 *mac_id); 372 void (*mac_get_id)(void *mac_drv, u8 *mac_id);
371 void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en); 373 void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
@@ -453,4 +455,6 @@ int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb);
453void hns_set_led_opt(struct hns_mac_cb *mac_cb); 455void hns_set_led_opt(struct hns_mac_cb *mac_cb);
454int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb, 456int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
455 enum hnae_led_state status); 457 enum hnae_led_state status);
458void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en);
459
456#endif /* _HNS_DSAF_MAC_H */ 460#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 38fc5be3870c..8439f6d8e360 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -748,8 +748,9 @@ static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
748 */ 748 */
749static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev) 749static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev)
750{ 750{
751 dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG, 751 if (AE_IS_VER1(dsaf_dev->dsaf_ver))
752 DSAF_FC_XGE_TX_PAUSE_S, 1); 752 dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG,
753 DSAF_FC_XGE_TX_PAUSE_S, 1);
753} 754}
754 755
755/* set msk for dsaf exception irq*/ 756/* set msk for dsaf exception irq*/
@@ -1021,12 +1022,52 @@ static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
1021 * @mac_cb: mac contrl block 1022 * @mac_cb: mac contrl block
1022 */ 1023 */
1023static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev, 1024static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
1024 int mac_id, int en) 1025 int mac_id, int tc_en)
1026{
1027 dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, tc_en);
1028}
1029
1030static void hns_dsaf_set_pfc_pause(struct dsaf_device *dsaf_dev,
1031 int mac_id, int tx_en, int rx_en)
1032{
1033 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
1034 if (!tx_en || !rx_en)
1035 dev_err(dsaf_dev->dev, "dsaf v1 can not close pfc!\n");
1036
1037 return;
1038 }
1039
1040 dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
1041 DSAF_PFC_PAUSE_RX_EN_B, !!rx_en);
1042 dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
1043 DSAF_PFC_PAUSE_TX_EN_B, !!tx_en);
1044}
1045
1046int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
1047 u32 en)
1048{
1049 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
1050 if (!en)
1051 dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n");
1052
1053 return -EINVAL;
1054 }
1055
1056 dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
1057 DSAF_MAC_PAUSE_RX_EN_B, !!en);
1058
1059 return 0;
1060}
1061
1062void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
1063 u32 *en)
1025{ 1064{
1026 if (!en) 1065 if (AE_IS_VER1(dsaf_dev->dsaf_ver))
1027 dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0); 1066 *en = 1;
1028 else 1067 else
1029 dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0xff); 1068 *en = dsaf_get_dev_bit(dsaf_dev,
1069 DSAF_PAUSE_CFG_REG + mac_id * 4,
1070 DSAF_MAC_PAUSE_RX_EN_B);
1030} 1071}
1031 1072
1032/** 1073/**
@@ -1038,6 +1079,7 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
1038{ 1079{
1039 u32 i; 1080 u32 i;
1040 u32 o_dsaf_cfg; 1081 u32 o_dsaf_cfg;
1082 bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
1041 1083
1042 o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG); 1084 o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG);
1043 dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en); 1085 dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en);
@@ -1063,8 +1105,10 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
1063 hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN); 1105 hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
1064 1106
1065 /*set dsaf pfc to 0 for parseing rx pause*/ 1107 /*set dsaf pfc to 0 for parseing rx pause*/
1066 for (i = 0; i < DSAF_COMM_CHN; i++) 1108 for (i = 0; i < DSAF_COMM_CHN; i++) {
1067 hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0); 1109 hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0);
1110 hns_dsaf_set_pfc_pause(dsaf_dev, i, is_ver1, is_ver1);
1111 }
1068 1112
1069 /*msk and clr exception irqs */ 1113 /*msk and clr exception irqs */
1070 for (i = 0; i < DSAF_COMM_CHN; i++) { 1114 for (i = 0; i < DSAF_COMM_CHN; i++) {
@@ -2012,6 +2056,8 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
2012{ 2056{
2013 struct dsaf_hw_stats *hw_stats 2057 struct dsaf_hw_stats *hw_stats
2014 = &dsaf_dev->hw_stats[node_num]; 2058 = &dsaf_dev->hw_stats[node_num];
2059 bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
2060 u32 reg_tmp;
2015 2061
2016 hw_stats->pad_drop += dsaf_read_dev(dsaf_dev, 2062 hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
2017 DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num); 2063 DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
@@ -2021,8 +2067,12 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
2021 DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num); 2067 DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num);
2022 hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev, 2068 hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev,
2023 DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num); 2069 DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num);
2024 hw_stats->rx_pause_frame += dsaf_read_dev(dsaf_dev, 2070
2025 DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + 0x80 * (u64)node_num); 2071 reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
2072 DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
2073 hw_stats->rx_pause_frame +=
2074 dsaf_read_dev(dsaf_dev, reg_tmp + 0x80 * (u64)node_num);
2075
2026 hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev, 2076 hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev,
2027 DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num); 2077 DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num);
2028 hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev, 2078 hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev,
@@ -2055,6 +2105,8 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
2055 u32 i = 0; 2105 u32 i = 0;
2056 u32 j; 2106 u32 j;
2057 u32 *p = data; 2107 u32 *p = data;
2108 u32 reg_tmp;
2109 bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
2058 2110
2059 /* dsaf common registers */ 2111 /* dsaf common registers */
2060 p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG); 2112 p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG);
@@ -2119,8 +2171,9 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
2119 DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80); 2171 DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80);
2120 p[190 + i] = dsaf_read_dev(ddev, 2172 p[190 + i] = dsaf_read_dev(ddev,
2121 DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80); 2173 DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80);
2122 p[193 + i] = dsaf_read_dev(ddev, 2174 reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
2123 DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + j * 0x80); 2175 DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
2176 p[193 + i] = dsaf_read_dev(ddev, reg_tmp + j * 0x80);
2124 p[196 + i] = dsaf_read_dev(ddev, 2177 p[196 + i] = dsaf_read_dev(ddev,
2125 DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80); 2178 DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80);
2126 p[199 + i] = dsaf_read_dev(ddev, 2179 p[199 + i] = dsaf_read_dev(ddev,
@@ -2218,17 +2271,17 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
2218 /* dsaf onode registers */ 2271 /* dsaf onode registers */
2219 for (i = 0; i < DSAF_XOD_NUM; i++) { 2272 for (i = 0; i < DSAF_XOD_NUM; i++) {
2220 p[311 + i] = dsaf_read_dev(ddev, 2273 p[311 + i] = dsaf_read_dev(ddev,
2221 DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + j * 0x90); 2274 DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
2222 p[319 + i] = dsaf_read_dev(ddev, 2275 p[319 + i] = dsaf_read_dev(ddev,
2223 DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + j * 0x90); 2276 DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
2224 p[327 + i] = dsaf_read_dev(ddev, 2277 p[327 + i] = dsaf_read_dev(ddev,
2225 DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + j * 0x90); 2278 DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
2226 p[335 + i] = dsaf_read_dev(ddev, 2279 p[335 + i] = dsaf_read_dev(ddev,
2227 DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + j * 0x90); 2280 DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
2228 p[343 + i] = dsaf_read_dev(ddev, 2281 p[343 + i] = dsaf_read_dev(ddev,
2229 DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + j * 0x90); 2282 DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
2230 p[351 + i] = dsaf_read_dev(ddev, 2283 p[351 + i] = dsaf_read_dev(ddev,
2231 DSAF_XOD_ETS_TOKEN_CFG_0_REG + j * 0x90); 2284 DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
2232 } 2285 }
2233 2286
2234 p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90); 2287 p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
@@ -2367,8 +2420,11 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
2367 p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4); 2420 p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
2368 p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4); 2421 p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
2369 2422
2423 if (!is_ver1)
2424 p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
2425
2370 /* mark end of dsaf regs */ 2426 /* mark end of dsaf regs */
2371 for (i = 498; i < 504; i++) 2427 for (i = 499; i < 504; i++)
2372 p[i] = 0xdddddddd; 2428 p[i] = 0xdddddddd;
2373} 2429}
2374 2430
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 5fea226efaf3..e8eedc571296 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -417,6 +417,11 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port);
417void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); 417void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
418int hns_dsaf_get_regs_count(void); 418int hns_dsaf_get_regs_count(void);
419void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en); 419void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
420
421void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
422 u32 *en);
423int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
424 u32 en);
420void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en); 425void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
421 426
422#endif /* __HNS_DSAF_MAIN_H__ */ 427#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 607c3be42241..e69b02287c44 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -244,31 +244,35 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
244 */ 244 */
245phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) 245phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
246{ 246{
247 u32 hilink3_mode; 247 u32 mode;
248 u32 hilink4_mode; 248 u32 reg;
249 u32 shift;
250 bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
249 void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr; 251 void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
250 int dev_id = mac_cb->mac_id; 252 int mac_id = mac_cb->mac_id;
251 phy_interface_t phy_if = PHY_INTERFACE_MODE_NA; 253 phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
252 254
253 hilink3_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK3_REG); 255 if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) {
254 hilink4_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK4_REG); 256 phy_if = PHY_INTERFACE_MODE_SGMII;
255 if (dev_id >= 0 && dev_id <= 3) { 257 } else if (mac_id >= 0 && mac_id <= 3) {
256 if (hilink4_mode == 0) 258 reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG;
257 phy_if = PHY_INTERFACE_MODE_SGMII; 259 mode = dsaf_read_reg(sys_ctl_vaddr, reg);
258 else 260 /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */
261 shift = is_ver1 ? 0 : mac_id;
262 if (dsaf_get_bit(mode, shift))
259 phy_if = PHY_INTERFACE_MODE_XGMII; 263 phy_if = PHY_INTERFACE_MODE_XGMII;
260 } else if (dev_id >= 4 && dev_id <= 5) {
261 if (hilink3_mode == 0)
262 phy_if = PHY_INTERFACE_MODE_SGMII;
263 else 264 else
265 phy_if = PHY_INTERFACE_MODE_SGMII;
266 } else if (mac_id >= 4 && mac_id <= 7) {
267 reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG;
268 mode = dsaf_read_reg(sys_ctl_vaddr, reg);
269 /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */
270 shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6;
271 if (dsaf_get_bit(mode, shift))
264 phy_if = PHY_INTERFACE_MODE_XGMII; 272 phy_if = PHY_INTERFACE_MODE_XGMII;
265 } else { 273 else
266 phy_if = PHY_INTERFACE_MODE_SGMII; 274 phy_if = PHY_INTERFACE_MODE_SGMII;
267 } 275 }
268
269 dev_dbg(mac_cb->dev,
270 "hilink3_mode=%d, hilink4_mode=%d dev_id=%d, phy_if=%d\n",
271 hilink3_mode, hilink4_mode, dev_id, phy_if);
272 return phy_if; 276 return phy_if;
273} 277}
274 278
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index f302ef9073c6..ab27b3b14ca3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -27,7 +27,7 @@ void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value)
27void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb, 27void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb,
28 const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]) 28 const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM])
29{ 29{
30 int key_item = 0; 30 u32 key_item;
31 31
32 for (key_item = 0; key_item < HNS_PPEV2_RSS_KEY_NUM; key_item++) 32 for (key_item = 0; key_item < HNS_PPEV2_RSS_KEY_NUM; key_item++)
33 dsaf_write_dev(ppe_cb, PPEV2_RSS_KEY_REG + key_item * 0x4, 33 dsaf_write_dev(ppe_cb, PPEV2_RSS_KEY_REG + key_item * 0x4,
@@ -332,10 +332,12 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
332 /* clr and msk except irq*/ 332 /* clr and msk except irq*/
333 hns_ppe_exc_irq_en(ppe_cb, 0); 333 hns_ppe_exc_irq_en(ppe_cb, 0);
334 334
335 if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG) 335 if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG) {
336 hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE); 336 hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE);
337 else 337 dsaf_write_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG, 0);
338 } else {
338 hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE); 339 hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE);
340 }
339 341
340 hns_ppe_checksum_hw(ppe_cb, 0xffffffff); 342 hns_ppe_checksum_hw(ppe_cb, 0xffffffff);
341 hns_ppe_cnt_clr_ce(ppe_cb); 343 hns_ppe_cnt_clr_ce(ppe_cb);
@@ -343,6 +345,9 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
343 if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) { 345 if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
344 hns_ppe_set_vlan_strip(ppe_cb, 0); 346 hns_ppe_set_vlan_strip(ppe_cb, 0);
345 347
348 dsaf_write_dev(ppe_cb, PPE_CFG_MAX_FRAME_LEN_REG,
349 HNS_PPEV2_MAX_FRAME_LEN);
350
346 /* set default RSS key in h/w */ 351 /* set default RSS key in h/w */
347 hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key); 352 hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
348 353
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 0f5cb6962acf..e9c0ec2fa0dd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -30,6 +30,8 @@
30#define HNS_PPEV2_RSS_KEY_SIZE 40 /* in bytes or 320 bits */ 30#define HNS_PPEV2_RSS_KEY_SIZE 40 /* in bytes or 320 bits */
31#define HNS_PPEV2_RSS_KEY_NUM (HNS_PPEV2_RSS_KEY_SIZE / sizeof(u32)) 31#define HNS_PPEV2_RSS_KEY_NUM (HNS_PPEV2_RSS_KEY_SIZE / sizeof(u32))
32 32
33#define HNS_PPEV2_MAX_FRAME_LEN 0X980
34
33enum ppe_qid_mode { 35enum ppe_qid_mode {
34 PPE_QID_MODE0 = 0, /* fixed queue id mode */ 36 PPE_QID_MODE0 = 0, /* fixed queue id mode */
35 PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */ 37 PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 12188807468c..28ee26e5c478 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -215,9 +215,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
215 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, 215 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
216 bd_size_type); 216 bd_size_type);
217 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, 217 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
218 ring_pair->port_id_in_dsa); 218 ring_pair->port_id_in_comm);
219 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, 219 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
220 ring_pair->port_id_in_dsa); 220 ring_pair->port_id_in_comm);
221 } else { 221 } else {
222 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, 222 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
223 (u32)dma); 223 (u32)dma);
@@ -227,9 +227,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
227 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, 227 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
228 bd_size_type); 228 bd_size_type);
229 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, 229 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
230 ring_pair->port_id_in_dsa); 230 ring_pair->port_id_in_comm);
231 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, 231 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
232 ring_pair->port_id_in_dsa); 232 ring_pair->port_id_in_comm);
233 } 233 }
234} 234}
235 235
@@ -256,50 +256,16 @@ static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
256 desc_cnt); 256 desc_cnt);
257} 257}
258 258
259/** 259static void hns_rcb_set_port_timeout(
260 *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames 260 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
261 *@rcb_common: rcb_common device
262 *@port_idx:port index
263 *@coalesced_frames:BD num for coalesced frames
264 */
265static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common,
266 u32 port_idx,
267 u32 coalesced_frames)
268{
269 if (coalesced_frames >= rcb_common->desc_num ||
270 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES)
271 return -EINVAL;
272
273 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
274 coalesced_frames);
275 return 0;
276}
277
278/**
279 *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames
280 *@rcb_common: rcb_common device
281 *@port_idx:port index
282 * return coaleseced frames value
283 */
284static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common,
285 u32 port_idx)
286{ 261{
287 if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) 262 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
288 port_idx = 0; 263 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
289 264 timeout * HNS_RCB_CLK_FREQ_MHZ);
290 return dsaf_read_dev(rcb_common, 265 else
291 RCB_CFG_PKTLINE_REG + port_idx * 4); 266 dsaf_write_dev(rcb_common,
292} 267 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
293 268 timeout);
294/**
295 *hns_rcb_set_timeout - set rcb port coalesced time_out
296 *@rcb_common: rcb_common device
297 *@time_out:time for coalesced time_out
298 */
299static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common,
300 u32 timeout)
301{
302 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout);
303} 269}
304 270
305static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) 271static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
@@ -361,10 +327,11 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
361 327
362 for (i = 0; i < port_num; i++) { 328 for (i = 0; i < port_num; i++) {
363 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); 329 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
364 (void)hns_rcb_set_port_coalesced_frames( 330 (void)hns_rcb_set_coalesced_frames(
365 rcb_common, i, rcb_common->coalesced_frames); 331 rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES);
332 hns_rcb_set_port_timeout(
333 rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
366 } 334 }
367 hns_rcb_set_timeout(rcb_common, rcb_common->timeout);
368 335
369 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, 336 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
370 HNS_RCB_COMMON_ENDIAN); 337 HNS_RCB_COMMON_ENDIAN);
@@ -460,7 +427,8 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
460 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); 427 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
461} 428}
462 429
463static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx) 430static int hns_rcb_get_port_in_comm(
431 struct rcb_common_cb *rcb_common, int ring_idx)
464{ 432{
465 int comm_index = rcb_common->comm_index; 433 int comm_index = rcb_common->comm_index;
466 int port; 434 int port;
@@ -470,7 +438,7 @@ static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
470 q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; 438 q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
471 port = ring_idx / q_num; 439 port = ring_idx / q_num;
472 } else { 440 } else {
473 port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1; 441 port = 0; /* config debug-ports port_id_in_comm to 0*/
474 } 442 }
475 443
476 return port; 444 return port;
@@ -518,7 +486,8 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
518 ring_pair_cb->index = i; 486 ring_pair_cb->index = i;
519 ring_pair_cb->q.io_base = 487 ring_pair_cb->q.io_base =
520 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); 488 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
521 ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i); 489 ring_pair_cb->port_id_in_comm =
490 hns_rcb_get_port_in_comm(rcb_common, i);
522 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = 491 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
523 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : 492 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) :
524 platform_get_irq(pdev, base_irq_idx + i * 3 + 1); 493 platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
@@ -534,82 +503,95 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
534/** 503/**
535 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames 504 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
536 *@rcb_common: rcb_common device 505 *@rcb_common: rcb_common device
537 *@comm_index:port index 506 *@port_idx:port id in comm
538 *return coalesced_frames 507 *
508 *Returns: coalesced_frames
539 */ 509 */
540u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port) 510u32 hns_rcb_get_coalesced_frames(
511 struct rcb_common_cb *rcb_common, u32 port_idx)
541{ 512{
542 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 513 return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
543 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
544
545 return hns_rcb_get_port_coalesced_frames(rcb_comm, port);
546} 514}
547 515
548/** 516/**
549 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out 517 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
550 *@rcb_common: rcb_common device 518 *@rcb_common: rcb_common device
551 *@comm_index:port index 519 *@port_idx:port id in comm
552 *return time_out 520 *
521 *Returns: time_out
553 */ 522 */
554u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index) 523u32 hns_rcb_get_coalesce_usecs(
524 struct rcb_common_cb *rcb_common, u32 port_idx)
555{ 525{
556 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; 526 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
557 527 return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) /
558 return rcb_comm->timeout; 528 HNS_RCB_CLK_FREQ_MHZ;
529 else
530 return dsaf_read_dev(rcb_common,
531 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4);
559} 532}
560 533
561/** 534/**
562 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out 535 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
563 *@rcb_common: rcb_common device 536 *@rcb_common: rcb_common device
564 *@comm_index: comm :index 537 *@port_idx:port id in comm
565 *@etx_usecs:tx time for coalesced time_out 538 *@timeout:tx/rx time for coalesced time_out
566 *@rx_usecs:rx time for coalesced time_out 539 *
540 * Returns:
541 * Zero for success, or an error code in case of failure
567 */ 542 */
568void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev, 543int hns_rcb_set_coalesce_usecs(
569 int port, u32 timeout) 544 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
570{ 545{
571 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 546 u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx);
572 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
573 547
574 if (rcb_comm->timeout == timeout) 548 if (timeout == old_timeout)
575 return; 549 return 0;
576 550
577 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 551 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
578 dev_err(dsaf_dev->dev, 552 if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
579 "error: not support coalesce_usecs setting!\n"); 553 dev_err(rcb_common->dsaf_dev->dev,
580 return; 554 "error: not support coalesce_usecs setting!\n");
555 return -EINVAL;
556 }
581 } 557 }
582 rcb_comm->timeout = timeout; 558 if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
583 hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout); 559 dev_err(rcb_common->dsaf_dev->dev,
560 "error: not support coalesce %dus!\n", timeout);
561 return -EINVAL;
562 }
563 hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
564 return 0;
584} 565}
585 566
586/** 567/**
587 *hns_rcb_set_coalesced_frames - set rcb coalesced frames 568 *hns_rcb_set_coalesced_frames - set rcb coalesced frames
588 *@rcb_common: rcb_common device 569 *@rcb_common: rcb_common device
589 *@tx_frames:tx BD num for coalesced frames 570 *@port_idx:port id in comm
590 *@rx_frames:rx BD num for coalesced frames 571 *@coalesced_frames:tx/rx BD num for coalesced frames
591 *Return 0 on success, negative on failure 572 *
573 * Returns:
574 * Zero for success, or an error code in case of failure
592 */ 575 */
593int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, 576int hns_rcb_set_coalesced_frames(
594 int port, u32 coalesced_frames) 577 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
595{ 578{
596 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 579 u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx);
597 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
598 u32 coalesced_reg_val;
599 int ret;
600 580
601 coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port); 581 if (coalesced_frames == old_waterline)
602
603 if (coalesced_reg_val == coalesced_frames)
604 return 0; 582 return 0;
605 583
606 if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) { 584 if (coalesced_frames >= rcb_common->desc_num ||
607 ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port, 585 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES ||
608 coalesced_frames); 586 coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) {
609 return ret; 587 dev_err(rcb_common->dsaf_dev->dev,
610 } else { 588 "error: not support coalesce_frames setting!\n");
611 return -EINVAL; 589 return -EINVAL;
612 } 590 }
591
592 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
593 coalesced_frames);
594 return 0;
613} 595}
614 596
615/** 597/**
@@ -749,8 +731,6 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
749 rcb_common->dsaf_dev = dsaf_dev; 731 rcb_common->dsaf_dev = dsaf_dev;
750 732
751 rcb_common->desc_num = dsaf_dev->desc_num; 733 rcb_common->desc_num = dsaf_dev->desc_num;
752 rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES;
753 rcb_common->timeout = HNS_RCB_MAX_TIME_OUT;
754 734
755 hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf); 735 hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
756 rcb_common->max_vfn = max_vfn; 736 rcb_common->max_vfn = max_vfn;
@@ -951,6 +931,10 @@ void hns_rcb_get_strings(int stringset, u8 *data, int index)
951void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) 931void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
952{ 932{
953 u32 *regs = data; 933 u32 *regs = data;
934 bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
935 bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX);
936 u32 reg_tmp;
937 u32 reg_num_tmp;
954 u32 i = 0; 938 u32 i = 0;
955 939
956 /*rcb common registers */ 940 /*rcb common registers */
@@ -1004,12 +988,16 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
1004 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); 988 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
1005 } 989 }
1006 990
1007 regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG); 991 reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG;
1008 regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); 992 reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6;
1009 regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); 993 for (i = 0; i < reg_num_tmp; i++)
994 regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp);
995
996 regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
997 regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
1010 998
1011 /* mark end of rcb common regs */ 999 /* mark end of rcb common regs */
1012 for (i = 73; i < 80; i++) 1000 for (i = 78; i < 80; i++)
1013 regs[i] = 0xcccccccc; 1001 regs[i] = 0xcccccccc;
1014} 1002}
1015 1003
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 81fe9f849973..eb61014ad615 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -38,7 +38,9 @@ struct rcb_common_cb;
38#define HNS_RCB_MAX_COALESCED_FRAMES 1023 38#define HNS_RCB_MAX_COALESCED_FRAMES 1023
39#define HNS_RCB_MIN_COALESCED_FRAMES 1 39#define HNS_RCB_MIN_COALESCED_FRAMES 1
40#define HNS_RCB_DEF_COALESCED_FRAMES 50 40#define HNS_RCB_DEF_COALESCED_FRAMES 50
41#define HNS_RCB_MAX_TIME_OUT 0x500 41#define HNS_RCB_CLK_FREQ_MHZ 350
42#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
43#define HNS_RCB_DEF_COALESCED_USECS 3
42 44
43#define HNS_RCB_COMMON_ENDIAN 1 45#define HNS_RCB_COMMON_ENDIAN 1
44 46
@@ -82,7 +84,7 @@ struct ring_pair_cb {
82 84
83 int virq[HNS_RCB_IRQ_NUM_PER_QUEUE]; 85 int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
84 86
85 u8 port_id_in_dsa; 87 u8 port_id_in_comm;
86 u8 used_by_vf; 88 u8 used_by_vf;
87 89
88 struct hns_ring_hw_stats hw_stats; 90 struct hns_ring_hw_stats hw_stats;
@@ -97,8 +99,6 @@ struct rcb_common_cb {
97 99
98 u8 comm_index; 100 u8 comm_index;
99 u32 ring_num; 101 u32 ring_num;
100 u32 coalesced_frames; /* frames threshold of rx interrupt */
101 u32 timeout; /* time threshold of rx interrupt */
102 u32 desc_num; /* desc num per queue*/ 102 u32 desc_num; /* desc num per queue*/
103 103
104 struct ring_pair_cb ring_pair_cb[0]; 104 struct ring_pair_cb ring_pair_cb[0];
@@ -125,13 +125,14 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
125void hns_rcb_init_hw(struct ring_pair_cb *ring); 125void hns_rcb_init_hw(struct ring_pair_cb *ring);
126void hns_rcb_reset_ring_hw(struct hnae_queue *q); 126void hns_rcb_reset_ring_hw(struct hnae_queue *q);
127void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); 127void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
128 128u32 hns_rcb_get_coalesced_frames(
129u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int comm_index); 129 struct rcb_common_cb *rcb_common, u32 port_idx);
130u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index); 130u32 hns_rcb_get_coalesce_usecs(
131void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev, 131 struct rcb_common_cb *rcb_common, u32 port_idx);
132 int comm_index, u32 timeout); 132int hns_rcb_set_coalesce_usecs(
133int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, 133 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
134 int comm_index, u32 coalesce_frames); 134int hns_rcb_set_coalesced_frames(
135 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
135void hns_rcb_update_stats(struct hnae_queue *queue); 136void hns_rcb_update_stats(struct hnae_queue *queue);
136 137
137void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data); 138void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 60d695daa471..7ff195e60b02 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -103,6 +103,8 @@
103/*serdes offset**/ 103/*serdes offset**/
104#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 104#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
105#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 105#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
106#define HNS_MAC_HILINK3V2_REG DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG
107#define HNS_MAC_HILINK4V2_REG DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG
106#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL 108#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
107#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL 109#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
108#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL 110#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
@@ -135,6 +137,7 @@
135#define DSAF_PPE_INT_STS_0_REG 0x1E0 137#define DSAF_PPE_INT_STS_0_REG 0x1E0
136#define DSAF_ROCEE_INT_STS_0_REG 0x200 138#define DSAF_ROCEE_INT_STS_0_REG 0x200
137#define DSAFV2_SERDES_LBK_0_REG 0x220 139#define DSAFV2_SERDES_LBK_0_REG 0x220
140#define DSAF_PAUSE_CFG_REG 0x240
138#define DSAF_PPE_QID_CFG_0_REG 0x300 141#define DSAF_PPE_QID_CFG_0_REG 0x300
139#define DSAF_SW_PORT_TYPE_0_REG 0x320 142#define DSAF_SW_PORT_TYPE_0_REG 0x320
140#define DSAF_STP_PORT_TYPE_0_REG 0x340 143#define DSAF_STP_PORT_TYPE_0_REG 0x340
@@ -153,6 +156,7 @@
153#define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG 0x1030 156#define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG 0x1030
154#define DSAF_INODE_SBM_PID_NUM_0_REG 0x1038 157#define DSAF_INODE_SBM_PID_NUM_0_REG 0x1038
155#define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x103C 158#define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x103C
159#define DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x1024
156#define DSAF_INODE_SBM_RELS_NUM_0_REG 0x104C 160#define DSAF_INODE_SBM_RELS_NUM_0_REG 0x104C
157#define DSAF_INODE_SBM_DROP_NUM_0_REG 0x1050 161#define DSAF_INODE_SBM_DROP_NUM_0_REG 0x1050
158#define DSAF_INODE_CRC_FALSE_NUM_0_REG 0x1054 162#define DSAF_INODE_CRC_FALSE_NUM_0_REG 0x1054
@@ -404,6 +408,7 @@
404#define RCB_CFG_OVERTIME_REG 0x9300 408#define RCB_CFG_OVERTIME_REG 0x9300
405#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304 409#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
406#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308 410#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
411#define RCB_PORT_CFG_OVERTIME_REG 0x9430
407 412
408#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000 413#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
409#define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004 414#define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004
@@ -708,6 +713,10 @@
708#define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1) 713#define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1)
709#define DSAF_PFC_UNINT_CNT_S 0 714#define DSAF_PFC_UNINT_CNT_S 0
710 715
716#define DSAF_MAC_PAUSE_RX_EN_B 2
717#define DSAF_PFC_PAUSE_RX_EN_B 1
718#define DSAF_PFC_PAUSE_TX_EN_B 0
719
711#define DSAF_PPE_QID_CFG_M 0xFF 720#define DSAF_PPE_QID_CFG_M 0xFF
712#define DSAF_PPE_QID_CFG_S 0 721#define DSAF_PPE_QID_CFG_S 0
713 722
@@ -922,6 +931,8 @@
922#define GMAC_LP_REG_CF2MI_LP_EN_B 2 931#define GMAC_LP_REG_CF2MI_LP_EN_B 2
923 932
924#define GMAC_MODE_CHANGE_EB_B 0 933#define GMAC_MODE_CHANGE_EB_B 0
934#define GMAC_UC_MATCH_EN_B 0
935#define GMAC_ADDR_EN_B 16
925 936
926#define GMAC_RECV_CTRL_STRIP_PAD_EN_B 3 937#define GMAC_RECV_CTRL_STRIP_PAD_EN_B 3
927#define GMAC_RECV_CTRL_RUNT_PKT_EN_B 4 938#define GMAC_RECV_CTRL_RUNT_PKT_EN_B 4
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 802d55457f19..fd90f3737963 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -7,7 +7,7 @@
7 * (at your option) any later version. 7 * (at your option) any later version.
8 */ 8 */
9 9
10#include <asm-generic/io-64-nonatomic-hi-lo.h> 10#include <linux/io-64-nonatomic-hi-lo.h>
11#include <linux/of_mdio.h> 11#include <linux/of_mdio.h>
12#include "hns_dsaf_main.h" 12#include "hns_dsaf_main.h"
13#include "hns_dsaf_mac.h" 13#include "hns_dsaf_mac.h"
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 3f77ff77abbc..687204b780b0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -48,7 +48,6 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
48 struct iphdr *iphdr; 48 struct iphdr *iphdr;
49 struct ipv6hdr *ipv6hdr; 49 struct ipv6hdr *ipv6hdr;
50 struct sk_buff *skb; 50 struct sk_buff *skb;
51 int skb_tmp_len;
52 __be16 protocol; 51 __be16 protocol;
53 u8 bn_pid = 0; 52 u8 bn_pid = 0;
54 u8 rrcfv = 0; 53 u8 rrcfv = 0;
@@ -66,10 +65,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
66 desc->addr = cpu_to_le64(dma); 65 desc->addr = cpu_to_le64(dma);
67 desc->tx.send_size = cpu_to_le16((u16)size); 66 desc->tx.send_size = cpu_to_le16((u16)size);
68 67
69 /*config bd buffer end */ 68 /* config bd buffer end */
70 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); 69 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
71 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1); 70 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
72 71
72 /* fill port_id in the tx bd for sending management pkts */
73 hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
74 HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
75
73 if (type == DESC_TYPE_SKB) { 76 if (type == DESC_TYPE_SKB) {
74 skb = (struct sk_buff *)priv; 77 skb = (struct sk_buff *)priv;
75 78
@@ -90,13 +93,13 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
90 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); 93 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
91 94
92 /* check for tcp/udp header */ 95 /* check for tcp/udp header */
93 if (iphdr->protocol == IPPROTO_TCP) { 96 if (iphdr->protocol == IPPROTO_TCP &&
97 skb_is_gso(skb)) {
94 hnae_set_bit(tvsvsn, 98 hnae_set_bit(tvsvsn,
95 HNSV2_TXD_TSE_B, 1); 99 HNSV2_TXD_TSE_B, 1);
96 skb_tmp_len = SKB_TMP_LEN(skb);
97 l4_len = tcp_hdrlen(skb); 100 l4_len = tcp_hdrlen(skb);
98 mss = mtu - skb_tmp_len - ETH_FCS_LEN; 101 mss = skb_shinfo(skb)->gso_size;
99 paylen = skb->len - skb_tmp_len; 102 paylen = skb->len - SKB_TMP_LEN(skb);
100 } 103 }
101 } else if (skb->protocol == htons(ETH_P_IPV6)) { 104 } else if (skb->protocol == htons(ETH_P_IPV6)) {
102 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1); 105 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
@@ -104,13 +107,13 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
104 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); 107 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
105 108
106 /* check for tcp/udp header */ 109 /* check for tcp/udp header */
107 if (ipv6hdr->nexthdr == IPPROTO_TCP) { 110 if (ipv6hdr->nexthdr == IPPROTO_TCP &&
111 skb_is_gso(skb) && skb_is_gso_v6(skb)) {
108 hnae_set_bit(tvsvsn, 112 hnae_set_bit(tvsvsn,
109 HNSV2_TXD_TSE_B, 1); 113 HNSV2_TXD_TSE_B, 1);
110 skb_tmp_len = SKB_TMP_LEN(skb);
111 l4_len = tcp_hdrlen(skb); 114 l4_len = tcp_hdrlen(skb);
112 mss = mtu - skb_tmp_len - ETH_FCS_LEN; 115 mss = skb_shinfo(skb)->gso_size;
113 paylen = skb->len - skb_tmp_len; 116 paylen = skb->len - SKB_TMP_LEN(skb);
114 } 117 }
115 } 118 }
116 desc->tx.ip_offset = ip_offset; 119 desc->tx.ip_offset = ip_offset;
@@ -564,6 +567,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
564 struct sk_buff *skb; 567 struct sk_buff *skb;
565 struct hnae_desc *desc; 568 struct hnae_desc *desc;
566 struct hnae_desc_cb *desc_cb; 569 struct hnae_desc_cb *desc_cb;
570 struct ethhdr *eh;
567 unsigned char *va; 571 unsigned char *va;
568 int bnum, length, i; 572 int bnum, length, i;
569 int pull_len; 573 int pull_len;
@@ -670,6 +674,14 @@ out_bnum_err:
670 return -EFAULT; 674 return -EFAULT;
671 } 675 }
672 676
677 /* filter out multicast pkt with the same src mac as this port */
678 eh = eth_hdr(skb);
679 if (unlikely(is_multicast_ether_addr(eh->h_dest) &&
680 ether_addr_equal(ndev->dev_addr, eh->h_source))) {
681 dev_kfree_skb_any(skb);
682 return -EFAULT;
683 }
684
673 ring->stats.rx_pkts++; 685 ring->stats.rx_pkts++;
674 ring->stats.rx_bytes += skb->len; 686 ring->stats.rx_bytes += skb->len;
675 687
@@ -901,10 +913,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
901static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) 913static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
902{ 914{
903 struct hnae_ring *ring = ring_data->ring; 915 struct hnae_ring *ring = ring_data->ring;
904 int head = ring->next_to_clean; 916 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
905
906 /* for hardware bug fixed */
907 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
908 917
909 if (head != ring->next_to_clean) { 918 if (head != ring->next_to_clean) {
910 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 919 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
@@ -947,8 +956,8 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
947 napi_complete(napi); 956 napi_complete(napi);
948 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 957 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
949 ring_data->ring, 0); 958 ring_data->ring, 0);
950 959 if (ring_data->fini_process)
951 ring_data->fini_process(ring_data); 960 ring_data->fini_process(ring_data);
952 return 0; 961 return 0;
953 } 962 }
954 963
@@ -1711,6 +1720,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1711{ 1720{
1712 struct hnae_handle *h = priv->ae_handle; 1721 struct hnae_handle *h = priv->ae_handle;
1713 struct hns_nic_ring_data *rd; 1722 struct hns_nic_ring_data *rd;
1723 bool is_ver1 = AE_IS_VER1(priv->enet_ver);
1714 int i; 1724 int i;
1715 1725
1716 if (h->q_num > NIC_MAX_Q_PER_VF) { 1726 if (h->q_num > NIC_MAX_Q_PER_VF) {
@@ -1728,7 +1738,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1728 rd->queue_index = i; 1738 rd->queue_index = i;
1729 rd->ring = &h->qs[i]->tx_ring; 1739 rd->ring = &h->qs[i]->tx_ring;
1730 rd->poll_one = hns_nic_tx_poll_one; 1740 rd->poll_one = hns_nic_tx_poll_one;
1731 rd->fini_process = hns_nic_tx_fini_pro; 1741 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
1732 1742
1733 netif_napi_add(priv->netdev, &rd->napi, 1743 netif_napi_add(priv->netdev, &rd->napi,
1734 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); 1744 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
@@ -1740,7 +1750,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1740 rd->ring = &h->qs[i - h->q_num]->rx_ring; 1750 rd->ring = &h->qs[i - h->q_num]->rx_ring;
1741 rd->poll_one = hns_nic_rx_poll_one; 1751 rd->poll_one = hns_nic_rx_poll_one;
1742 rd->ex_process = hns_nic_rx_up_pro; 1752 rd->ex_process = hns_nic_rx_up_pro;
1743 rd->fini_process = hns_nic_rx_fini_pro; 1753 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
1744 1754
1745 netif_napi_add(priv->netdev, &rd->napi, 1755 netif_napi_add(priv->netdev, &rd->napi,
1746 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); 1756 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
@@ -1804,7 +1814,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
1804 h = hnae_get_handle(&priv->netdev->dev, 1814 h = hnae_get_handle(&priv->netdev->dev,
1805 priv->ae_node, priv->port_id, NULL); 1815 priv->ae_node, priv->port_id, NULL);
1806 if (IS_ERR_OR_NULL(h)) { 1816 if (IS_ERR_OR_NULL(h)) {
1807 ret = PTR_ERR(h); 1817 ret = -ENODEV;
1808 dev_dbg(priv->dev, "has not handle, register notifier!\n"); 1818 dev_dbg(priv->dev, "has not handle, register notifier!\n");
1809 goto out; 1819 goto out;
1810 } 1820 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3c4a3bc31a89..3d746c887873 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -794,8 +794,10 @@ static int hns_set_coalesce(struct net_device *net_dev,
794 (!ops->set_coalesce_frames)) 794 (!ops->set_coalesce_frames))
795 return -ESRCH; 795 return -ESRCH;
796 796
797 ops->set_coalesce_usecs(priv->ae_handle, 797 ret = ops->set_coalesce_usecs(priv->ae_handle,
798 ec->rx_coalesce_usecs); 798 ec->rx_coalesce_usecs);
799 if (ret)
800 return ret;
799 801
800 ret = ops->set_coalesce_frames( 802 ret = ops->set_coalesce_frames(
801 priv->ae_handle, 803 priv->ae_handle,
@@ -1013,8 +1015,8 @@ int hns_phy_led_set(struct net_device *netdev, int value)
1013 struct phy_device *phy_dev = priv->phy; 1015 struct phy_device *phy_dev = priv->phy;
1014 1016
1015 retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); 1017 retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
1016 retval = phy_write(phy_dev, HNS_LED_FC_REG, value); 1018 retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
1017 retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); 1019 retval |= phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
1018 if (retval) { 1020 if (retval) {
1019 netdev_err(netdev, "mdiobus_write fail !\n"); 1021 netdev_err(netdev, "mdiobus_write fail !\n");
1020 return retval; 1022 return retval;
@@ -1173,18 +1175,15 @@ hns_get_rss_key_size(struct net_device *netdev)
1173{ 1175{
1174 struct hns_nic_priv *priv = netdev_priv(netdev); 1176 struct hns_nic_priv *priv = netdev_priv(netdev);
1175 struct hnae_ae_ops *ops; 1177 struct hnae_ae_ops *ops;
1176 u32 ret;
1177 1178
1178 if (AE_IS_VER1(priv->enet_ver)) { 1179 if (AE_IS_VER1(priv->enet_ver)) {
1179 netdev_err(netdev, 1180 netdev_err(netdev,
1180 "RSS feature is not supported on this hardware\n"); 1181 "RSS feature is not supported on this hardware\n");
1181 return -EOPNOTSUPP; 1182 return 0;
1182 } 1183 }
1183 1184
1184 ops = priv->ae_handle->dev->ops; 1185 ops = priv->ae_handle->dev->ops;
1185 ret = ops->get_rss_key_size(priv->ae_handle); 1186 return ops->get_rss_key_size(priv->ae_handle);
1186
1187 return ret;
1188} 1187}
1189 1188
1190static u32 1189static u32
@@ -1192,18 +1191,15 @@ hns_get_rss_indir_size(struct net_device *netdev)
1192{ 1191{
1193 struct hns_nic_priv *priv = netdev_priv(netdev); 1192 struct hns_nic_priv *priv = netdev_priv(netdev);
1194 struct hnae_ae_ops *ops; 1193 struct hnae_ae_ops *ops;
1195 u32 ret;
1196 1194
1197 if (AE_IS_VER1(priv->enet_ver)) { 1195 if (AE_IS_VER1(priv->enet_ver)) {
1198 netdev_err(netdev, 1196 netdev_err(netdev,
1199 "RSS feature is not supported on this hardware\n"); 1197 "RSS feature is not supported on this hardware\n");
1200 return -EOPNOTSUPP; 1198 return 0;
1201 } 1199 }
1202 1200
1203 ops = priv->ae_handle->dev->ops; 1201 ops = priv->ae_handle->dev->ops;
1204 ret = ops->get_rss_indir_size(priv->ae_handle); 1202 return ops->get_rss_indir_size(priv->ae_handle);
1205
1206 return ret;
1207} 1203}
1208 1204
1209static int 1205static int
@@ -1211,7 +1207,6 @@ hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
1211{ 1207{
1212 struct hns_nic_priv *priv = netdev_priv(netdev); 1208 struct hns_nic_priv *priv = netdev_priv(netdev);
1213 struct hnae_ae_ops *ops; 1209 struct hnae_ae_ops *ops;
1214 int ret;
1215 1210
1216 if (AE_IS_VER1(priv->enet_ver)) { 1211 if (AE_IS_VER1(priv->enet_ver)) {
1217 netdev_err(netdev, 1212 netdev_err(netdev,
@@ -1224,9 +1219,7 @@ hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
1224 if (!indir) 1219 if (!indir)
1225 return 0; 1220 return 0;
1226 1221
1227 ret = ops->get_rss(priv->ae_handle, indir, key, hfunc); 1222 return ops->get_rss(priv->ae_handle, indir, key, hfunc);
1228
1229 return 0;
1230} 1223}
1231 1224
1232static int 1225static int
@@ -1235,7 +1228,6 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
1235{ 1228{
1236 struct hns_nic_priv *priv = netdev_priv(netdev); 1229 struct hns_nic_priv *priv = netdev_priv(netdev);
1237 struct hnae_ae_ops *ops; 1230 struct hnae_ae_ops *ops;
1238 int ret;
1239 1231
1240 if (AE_IS_VER1(priv->enet_ver)) { 1232 if (AE_IS_VER1(priv->enet_ver)) {
1241 netdev_err(netdev, 1233 netdev_err(netdev,
@@ -1252,7 +1244,22 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
1252 if (!indir) 1244 if (!indir)
1253 return 0; 1245 return 0;
1254 1246
1255 ret = ops->set_rss(priv->ae_handle, indir, key, hfunc); 1247 return ops->set_rss(priv->ae_handle, indir, key, hfunc);
1248}
1249
1250static int hns_get_rxnfc(struct net_device *netdev,
1251 struct ethtool_rxnfc *cmd,
1252 u32 *rule_locs)
1253{
1254 struct hns_nic_priv *priv = netdev_priv(netdev);
1255
1256 switch (cmd->cmd) {
1257 case ETHTOOL_GRXRINGS:
1258 cmd->data = priv->ae_handle->q_num;
1259 break;
1260 default:
1261 return -EOPNOTSUPP;
1262 }
1256 1263
1257 return 0; 1264 return 0;
1258} 1265}
@@ -1280,6 +1287,7 @@ static struct ethtool_ops hns_ethtool_ops = {
1280 .get_rxfh_indir_size = hns_get_rss_indir_size, 1287 .get_rxfh_indir_size = hns_get_rss_indir_size,
1281 .get_rxfh = hns_get_rss, 1288 .get_rxfh = hns_get_rss,
1282 .set_rxfh = hns_set_rss, 1289 .set_rxfh = hns_set_rss,
1290 .get_rxnfc = hns_get_rxnfc,
1283}; 1291};
1284 1292
1285void hns_ethtool_set_ops(struct net_device *ndev) 1293void hns_ethtool_set_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6e9e16eee5d0..864cb21351a4 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -61,6 +61,7 @@
61#include <linux/proc_fs.h> 61#include <linux/proc_fs.h>
62#include <linux/in.h> 62#include <linux/in.h>
63#include <linux/ip.h> 63#include <linux/ip.h>
64#include <linux/ipv6.h>
64#include <linux/irq.h> 65#include <linux/irq.h>
65#include <linux/kthread.h> 66#include <linux/kthread.h>
66#include <linux/seq_file.h> 67#include <linux/seq_file.h>
@@ -94,6 +95,7 @@ static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
94static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 95static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
95static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, 96static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
96 union sub_crq *sub_crq); 97 union sub_crq *sub_crq);
98static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
97static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 99static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
98static int enable_scrq_irq(struct ibmvnic_adapter *, 100static int enable_scrq_irq(struct ibmvnic_adapter *,
99 struct ibmvnic_sub_crq_queue *); 101 struct ibmvnic_sub_crq_queue *);
@@ -561,10 +563,141 @@ static int ibmvnic_close(struct net_device *netdev)
561 return 0; 563 return 0;
562} 564}
563 565
566/**
567 * build_hdr_data - creates L2/L3/L4 header data buffer
568 * @hdr_field - bitfield determining needed headers
569 * @skb - socket buffer
570 * @hdr_len - array of header lengths
571 * @tot_len - total length of data
572 *
573 * Reads hdr_field to determine which headers are needed by firmware.
574 * Builds a buffer containing these headers. Saves individual header
575 * lengths and total buffer length to be used to build descriptors.
576 */
577static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
578 int *hdr_len, u8 *hdr_data)
579{
580 int len = 0;
581 u8 *hdr;
582
583 hdr_len[0] = sizeof(struct ethhdr);
584
585 if (skb->protocol == htons(ETH_P_IP)) {
586 hdr_len[1] = ip_hdr(skb)->ihl * 4;
587 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
588 hdr_len[2] = tcp_hdrlen(skb);
589 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
590 hdr_len[2] = sizeof(struct udphdr);
591 } else if (skb->protocol == htons(ETH_P_IPV6)) {
592 hdr_len[1] = sizeof(struct ipv6hdr);
593 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
594 hdr_len[2] = tcp_hdrlen(skb);
595 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
596 hdr_len[2] = sizeof(struct udphdr);
597 }
598
599 memset(hdr_data, 0, 120);
600 if ((hdr_field >> 6) & 1) {
601 hdr = skb_mac_header(skb);
602 memcpy(hdr_data, hdr, hdr_len[0]);
603 len += hdr_len[0];
604 }
605
606 if ((hdr_field >> 5) & 1) {
607 hdr = skb_network_header(skb);
608 memcpy(hdr_data + len, hdr, hdr_len[1]);
609 len += hdr_len[1];
610 }
611
612 if ((hdr_field >> 4) & 1) {
613 hdr = skb_transport_header(skb);
614 memcpy(hdr_data + len, hdr, hdr_len[2]);
615 len += hdr_len[2];
616 }
617 return len;
618}
619
620/**
621 * create_hdr_descs - create header and header extension descriptors
622 * @hdr_field - bitfield determining needed headers
623 * @data - buffer containing header data
624 * @len - length of data buffer
625 * @hdr_len - array of individual header lengths
626 * @scrq_arr - descriptor array
627 *
628 * Creates header and, if needed, header extension descriptors and
629 * places them in a descriptor array, scrq_arr
630 */
631
632static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
633 union sub_crq *scrq_arr)
634{
635 union sub_crq hdr_desc;
636 int tmp_len = len;
637 u8 *data, *cur;
638 int tmp;
639
640 while (tmp_len > 0) {
641 cur = hdr_data + len - tmp_len;
642
643 memset(&hdr_desc, 0, sizeof(hdr_desc));
644 if (cur != hdr_data) {
645 data = hdr_desc.hdr_ext.data;
646 tmp = tmp_len > 29 ? 29 : tmp_len;
647 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
648 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
649 hdr_desc.hdr_ext.len = tmp;
650 } else {
651 data = hdr_desc.hdr.data;
652 tmp = tmp_len > 24 ? 24 : tmp_len;
653 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
654 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
655 hdr_desc.hdr.len = tmp;
656 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
657 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
658 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
659 hdr_desc.hdr.flag = hdr_field << 1;
660 }
661 memcpy(data, cur, tmp);
662 tmp_len -= tmp;
663 *scrq_arr = hdr_desc;
664 scrq_arr++;
665 }
666}
667
668/**
669 * build_hdr_descs_arr - build a header descriptor array
670 * @skb - socket buffer
671 * @num_entries - number of descriptors to be sent
672 * @subcrq - first TX descriptor
673 * @hdr_field - bit field determining which headers will be sent
674 *
675 * This function will build a TX descriptor array with applicable
676 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
677 */
678
679static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
680 int *num_entries, u8 hdr_field)
681{
682 int hdr_len[3] = {0, 0, 0};
683 int tot_len, len;
684 u8 *hdr_data = txbuff->hdr_data;
685
686 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
687 txbuff->hdr_data);
688 len = tot_len;
689 len -= 24;
690 if (len > 0)
691 num_entries += len % 29 ? len / 29 + 1 : len / 29;
692 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
693 txbuff->indir_arr + 1);
694}
695
564static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 696static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
565{ 697{
566 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 698 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
567 int queue_num = skb_get_queue_mapping(skb); 699 int queue_num = skb_get_queue_mapping(skb);
700 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
568 struct device *dev = &adapter->vdev->dev; 701 struct device *dev = &adapter->vdev->dev;
569 struct ibmvnic_tx_buff *tx_buff = NULL; 702 struct ibmvnic_tx_buff *tx_buff = NULL;
570 struct ibmvnic_tx_pool *tx_pool; 703 struct ibmvnic_tx_pool *tx_pool;
@@ -579,6 +712,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
579 unsigned long lpar_rc; 712 unsigned long lpar_rc;
580 union sub_crq tx_crq; 713 union sub_crq tx_crq;
581 unsigned int offset; 714 unsigned int offset;
715 int num_entries = 1;
582 unsigned char *dst; 716 unsigned char *dst;
583 u64 *handle_array; 717 u64 *handle_array;
584 int index = 0; 718 int index = 0;
@@ -644,11 +778,35 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
644 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 778 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
645 } 779 }
646 780
647 if (skb->ip_summed == CHECKSUM_PARTIAL) 781 if (skb->ip_summed == CHECKSUM_PARTIAL) {
648 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 782 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
649 783 hdrs += 2;
650 lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq); 784 }
651 785 /* determine if l2/3/4 headers are sent to firmware */
786 if ((*hdrs >> 7) & 1 &&
787 (skb->protocol == htons(ETH_P_IP) ||
788 skb->protocol == htons(ETH_P_IPV6))) {
789 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
790 tx_crq.v1.n_crq_elem = num_entries;
791 tx_buff->indir_arr[0] = tx_crq;
792 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
793 sizeof(tx_buff->indir_arr),
794 DMA_TO_DEVICE);
795 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
796 if (!firmware_has_feature(FW_FEATURE_CMO))
797 dev_err(dev, "tx: unable to map descriptor array\n");
798 tx_map_failed++;
799 tx_dropped++;
800 ret = NETDEV_TX_BUSY;
801 goto out;
802 }
803 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
804 (u64)tx_buff->indir_dma,
805 (u64)num_entries);
806 } else {
807 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
808 &tx_crq);
809 }
652 if (lpar_rc != H_SUCCESS) { 810 if (lpar_rc != H_SUCCESS) {
653 dev_err(dev, "tx failed with code %ld\n", lpar_rc); 811 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
654 812
@@ -832,7 +990,7 @@ restart_poll:
832 netdev->stats.rx_bytes += length; 990 netdev->stats.rx_bytes += length;
833 frames_processed++; 991 frames_processed++;
834 } 992 }
835 replenish_pools(adapter); 993 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
836 994
837 if (frames_processed < budget) { 995 if (frames_processed < budget) {
838 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 996 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1159,6 +1317,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1159 union sub_crq *next; 1317 union sub_crq *next;
1160 int index; 1318 int index;
1161 int i, j; 1319 int i, j;
1320 u8 first;
1162 1321
1163restart_loop: 1322restart_loop:
1164 while (pending_scrq(adapter, scrq)) { 1323 while (pending_scrq(adapter, scrq)) {
@@ -1181,6 +1340,13 @@ restart_loop:
1181 txbuff->data_dma[j] = 0; 1340 txbuff->data_dma[j] = 0;
1182 txbuff->used_bounce = false; 1341 txbuff->used_bounce = false;
1183 } 1342 }
1343 /* if sub_crq was sent indirectly */
1344 first = txbuff->indir_arr[0].generic.first;
1345 if (first == IBMVNIC_CRQ_CMD) {
1346 dma_unmap_single(dev, txbuff->indir_dma,
1347 sizeof(txbuff->indir_arr),
1348 DMA_TO_DEVICE);
1349 }
1184 1350
1185 if (txbuff->last_frag) 1351 if (txbuff->last_frag)
1186 dev_kfree_skb_any(txbuff->skb); 1352 dev_kfree_skb_any(txbuff->skb);
@@ -1261,9 +1427,9 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1261 entries_page : adapter->max_rx_add_entries_per_subcrq; 1427 entries_page : adapter->max_rx_add_entries_per_subcrq;
1262 1428
1263 /* Choosing the maximum number of queues supported by firmware*/ 1429 /* Choosing the maximum number of queues supported by firmware*/
1264 adapter->req_tx_queues = adapter->min_tx_queues; 1430 adapter->req_tx_queues = adapter->max_tx_queues;
1265 adapter->req_rx_queues = adapter->min_rx_queues; 1431 adapter->req_rx_queues = adapter->max_rx_queues;
1266 adapter->req_rx_add_queues = adapter->min_rx_add_queues; 1432 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1267 1433
1268 adapter->req_mtu = adapter->max_mtu; 1434 adapter->req_mtu = adapter->max_mtu;
1269 } 1435 }
@@ -1494,6 +1660,28 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1494 return rc; 1660 return rc;
1495} 1661}
1496 1662
1663static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1664 u64 remote_handle, u64 ioba, u64 num_entries)
1665{
1666 unsigned int ua = adapter->vdev->unit_address;
1667 struct device *dev = &adapter->vdev->dev;
1668 int rc;
1669
1670 /* Make sure the hypervisor sees the complete request */
1671 mb();
1672 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1673 cpu_to_be64(remote_handle),
1674 ioba, num_entries);
1675
1676 if (rc) {
1677 if (rc == H_CLOSED)
1678 dev_warn(dev, "CRQ Queue closed\n");
1679 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1680 }
1681
1682 return rc;
1683}
1684
1497static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 1685static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1498 union ibmvnic_crq *crq) 1686 union ibmvnic_crq *crq)
1499{ 1687{
@@ -1589,13 +1777,11 @@ static void send_login(struct ibmvnic_adapter *adapter)
1589 goto buf_map_failed; 1777 goto buf_map_failed;
1590 } 1778 }
1591 1779
1592 rsp_buffer_size = 1780 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1593 sizeof(struct ibmvnic_login_rsp_buffer) + 1781 sizeof(u64) * adapter->req_tx_queues +
1594 sizeof(u64) * (adapter->req_tx_queues + 1782 sizeof(u64) * adapter->req_rx_queues +
1595 adapter->req_rx_queues * 1783 sizeof(u64) * adapter->req_rx_queues +
1596 adapter->req_rx_add_queues + adapter-> 1784 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
1597 req_rx_add_queues) +
1598 sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS);
1599 1785
1600 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 1786 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1601 if (!login_rsp_buffer) 1787 if (!login_rsp_buffer)
@@ -1918,6 +2104,10 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
1918 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 2104 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
1919 adapter->netdev->features |= NETIF_F_IPV6_CSUM; 2105 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
1920 2106
2107 if ((adapter->netdev->features &
2108 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2109 adapter->netdev->features |= NETIF_F_RXCSUM;
2110
1921 memset(&crq, 0, sizeof(crq)); 2111 memset(&crq, 0, sizeof(crq));
1922 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 2112 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
1923 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 2113 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
@@ -2210,6 +2400,16 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2210 dma_unmap_single(dev, adapter->login_rsp_buf_token, 2400 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2211 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL); 2401 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2212 2402
2403 /* If the number of queues requested can't be allocated by the
2404 * server, the login response will return with code 1. We will need
2405 * to resend the login buffer with fewer queues requested.
2406 */
2407 if (login_rsp_crq->generic.rc.code) {
2408 adapter->renegotiate = true;
2409 complete(&adapter->init_done);
2410 return 0;
2411 }
2412
2213 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 2413 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2214 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 2414 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2215 netdev_dbg(adapter->netdev, "%016lx\n", 2415 netdev_dbg(adapter->netdev, "%016lx\n",
@@ -3437,14 +3637,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3437 init_completion(&adapter->init_done); 3637 init_completion(&adapter->init_done);
3438 wait_for_completion(&adapter->init_done); 3638 wait_for_completion(&adapter->init_done);
3439 3639
3440 /* needed to pull init_sub_crqs outside of an interrupt context 3640 do {
3441 * because it creates IRQ mappings for the subCRQ queues, causing 3641 adapter->renegotiate = false;
3442 * a kernel warning
3443 */
3444 init_sub_crqs(adapter, 0);
3445 3642
3446 reinit_completion(&adapter->init_done); 3643 init_sub_crqs(adapter, 0);
3447 wait_for_completion(&adapter->init_done); 3644 reinit_completion(&adapter->init_done);
3645 wait_for_completion(&adapter->init_done);
3646
3647 if (adapter->renegotiate) {
3648 release_sub_crqs(adapter);
3649 send_cap_queries(adapter);
3650
3651 reinit_completion(&adapter->init_done);
3652 wait_for_completion(&adapter->init_done);
3653 }
3654 } while (adapter->renegotiate);
3448 3655
3449 /* if init_sub_crqs is partially successful, retry */ 3656 /* if init_sub_crqs is partially successful, retry */
3450 while (!adapter->tx_scrq || !adapter->rx_scrq) { 3657 while (!adapter->tx_scrq || !adapter->rx_scrq) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1a9993cc79b5..0b66a506a4e4 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -879,6 +879,9 @@ struct ibmvnic_tx_buff {
879 int pool_index; 879 int pool_index;
880 bool last_frag; 880 bool last_frag;
881 bool used_bounce; 881 bool used_bounce;
882 union sub_crq indir_arr[6];
883 u8 hdr_data[140];
884 dma_addr_t indir_dma;
882}; 885};
883 886
884struct ibmvnic_tx_pool { 887struct ibmvnic_tx_pool {
@@ -977,6 +980,7 @@ struct ibmvnic_adapter {
977 struct ibmvnic_sub_crq_queue **tx_scrq; 980 struct ibmvnic_sub_crq_queue **tx_scrq;
978 struct ibmvnic_sub_crq_queue **rx_scrq; 981 struct ibmvnic_sub_crq_queue **rx_scrq;
979 int requested_caps; 982 int requested_caps;
983 bool renegotiate;
980 984
981 /* rx structs */ 985 /* rx structs */
982 struct napi_struct *napi; 986 struct napi_struct *napi;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 3772f3ac956e..714bd1014ddb 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -25,16 +25,13 @@ config E100
25 on the adapter. Look for a label that has a barcode and a number 25 on the adapter. Look for a label that has a barcode and a number
26 in the format 123456-001 (six digits hyphen three digits). 26 in the format 123456-001 (six digits hyphen three digits).
27 27
28 Use the above information and the Adapter & Driver ID Guide at: 28 Use the above information and the Adapter & Driver ID Guide that
29 can be located at:
29 30
30 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 31 <http://support.intel.com>
31 32
32 to identify the adapter. 33 to identify the adapter.
33 34
34 For the latest Intel PRO/100 network driver for Linux, see:
35
36 <http://www.intel.com/p/en_US/support/highlights/network/pro100plus>
37
38 More specific information on configuring the driver is in 35 More specific information on configuring the driver is in
39 <file:Documentation/networking/e100.txt>. 36 <file:Documentation/networking/e100.txt>.
40 37
@@ -47,12 +44,7 @@ config E1000
47 ---help--- 44 ---help---
48 This driver supports Intel(R) PRO/1000 gigabit ethernet family of 45 This driver supports Intel(R) PRO/1000 gigabit ethernet family of
49 adapters. For more information on how to identify your adapter, go 46 adapters. For more information on how to identify your adapter, go
50 to the Adapter & Driver ID Guide at: 47 to the Adapter & Driver ID Guide that can be located at:
51
52 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
53
54 For general information and support, go to the Intel support
55 website at:
56 48
57 <http://support.intel.com> 49 <http://support.intel.com>
58 50
@@ -71,12 +63,8 @@ config E1000E
71 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit 63 This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
72 ethernet family of adapters. For PCI or PCI-X e1000 adapters, 64 ethernet family of adapters. For PCI or PCI-X e1000 adapters,
73 use the regular e1000 driver For more information on how to 65 use the regular e1000 driver For more information on how to
74 identify your adapter, go to the Adapter & Driver ID Guide at: 66 identify your adapter, go to the Adapter & Driver ID Guide that
75 67 can be located at:
76 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
77
78 For general information and support, go to the Intel support
79 website at:
80 68
81 <http://support.intel.com> 69 <http://support.intel.com>
82 70
@@ -101,12 +89,7 @@ config IGB
101 ---help--- 89 ---help---
102 This driver supports Intel(R) 82575/82576 gigabit ethernet family of 90 This driver supports Intel(R) 82575/82576 gigabit ethernet family of
103 adapters. For more information on how to identify your adapter, go 91 adapters. For more information on how to identify your adapter, go
104 to the Adapter & Driver ID Guide at: 92 to the Adapter & Driver ID Guide that can be located at:
105
106 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
107
108 For general information and support, go to the Intel support
109 website at:
110 93
111 <http://support.intel.com> 94 <http://support.intel.com>
112 95
@@ -142,12 +125,7 @@ config IGBVF
142 ---help--- 125 ---help---
143 This driver supports Intel(R) 82576 virtual functions. For more 126 This driver supports Intel(R) 82576 virtual functions. For more
144 information on how to identify your adapter, go to the Adapter & 127 information on how to identify your adapter, go to the Adapter &
145 Driver ID Guide at: 128 Driver ID Guide that can be located at:
146
147 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
148
149 For general information and support, go to the Intel support
150 website at:
151 129
152 <http://support.intel.com> 130 <http://support.intel.com>
153 131
@@ -164,12 +142,7 @@ config IXGB
164 This driver supports Intel(R) PRO/10GbE family of adapters for 142 This driver supports Intel(R) PRO/10GbE family of adapters for
165 PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver 143 PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
166 instead. For more information on how to identify your adapter, go 144 instead. For more information on how to identify your adapter, go
167 to the Adapter & Driver ID Guide at: 145 to the Adapter & Driver ID Guide that can be located at:
168
169 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
170
171 For general information and support, go to the Intel support
172 website at:
173 146
174 <http://support.intel.com> 147 <http://support.intel.com>
175 148
@@ -187,12 +160,7 @@ config IXGBE
187 ---help--- 160 ---help---
188 This driver supports Intel(R) 10GbE PCI Express family of 161 This driver supports Intel(R) 10GbE PCI Express family of
189 adapters. For more information on how to identify your adapter, go 162 adapters. For more information on how to identify your adapter, go
190 to the Adapter & Driver ID Guide at: 163 to the Adapter & Driver ID Guide that can be located at:
191
192 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
193
194 For general information and support, go to the Intel support
195 website at:
196 164
197 <http://support.intel.com> 165 <http://support.intel.com>
198 166
@@ -243,12 +211,7 @@ config IXGBEVF
243 ---help--- 211 ---help---
244 This driver supports Intel(R) PCI Express virtual functions for the 212 This driver supports Intel(R) PCI Express virtual functions for the
245 Intel(R) ixgbe driver. For more information on how to identify your 213 Intel(R) ixgbe driver. For more information on how to identify your
246 adapter, go to the Adapter & Driver ID Guide at: 214 adapter, go to the Adapter & Driver ID Guide that can be located at:
247
248 <http://support.intel.com/support/network/sb/CS-008441.htm>
249
250 For general information and support, go to the Intel support
251 website at:
252 215
253 <http://support.intel.com> 216 <http://support.intel.com>
254 217
@@ -266,12 +229,7 @@ config I40E
266 ---help--- 229 ---help---
267 This driver supports Intel(R) Ethernet Controller XL710 Family of 230 This driver supports Intel(R) Ethernet Controller XL710 Family of
268 devices. For more information on how to identify your adapter, go 231 devices. For more information on how to identify your adapter, go
269 to the Adapter & Driver ID Guide at: 232 to the Adapter & Driver ID Guide that can be located at:
270
271 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
272
273 For general information and support, go to the Intel support
274 website at:
275 233
276 <http://support.intel.com> 234 <http://support.intel.com>
277 235
@@ -326,12 +284,7 @@ config I40EVF
326 ---help--- 284 ---help---
327 This driver supports Intel(R) XL710 and X710 virtual functions. 285 This driver supports Intel(R) XL710 and X710 virtual functions.
328 For more information on how to identify your adapter, go to the 286 For more information on how to identify your adapter, go to the
329 Adapter & Driver ID Guide at: 287 Adapter & Driver ID Guide that can be located at:
330
331 <http://support.intel.com/support/network/sb/CS-008441.htm>
332
333 For general information and support, go to the Intel support
334 website at:
335 288
336 <http://support.intel.com> 289 <http://support.intel.com>
337 290
@@ -347,12 +300,7 @@ config FM10K
347 ---help--- 300 ---help---
348 This driver supports Intel(R) FM10000 Ethernet Switch Host 301 This driver supports Intel(R) FM10000 Ethernet Switch Host
349 Interface. For more information on how to identify your adapter, 302 Interface. For more information on how to identify your adapter,
350 go to the Adapter & Driver ID Guide at: 303 go to the Adapter & Driver ID Guide that can be located at:
351
352 <http://support.intel.com/support/network/sb/CS-008441.htm>
353
354 For general information and support, go to the Intel support
355 website at:
356 304
357 <http://support.intel.com> 305 <http://support.intel.com>
358 306
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 98fe5a2cd6e3..d7bdea79e9fa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -358,6 +358,8 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
358extern char e1000_driver_name[]; 358extern char e1000_driver_name[];
359extern const char e1000_driver_version[]; 359extern const char e1000_driver_version[];
360 360
361int e1000_open(struct net_device *netdev);
362int e1000_close(struct net_device *netdev);
361int e1000_up(struct e1000_adapter *adapter); 363int e1000_up(struct e1000_adapter *adapter);
362void e1000_down(struct e1000_adapter *adapter); 364void e1000_down(struct e1000_adapter *adapter);
363void e1000_reinit_locked(struct e1000_adapter *adapter); 365void e1000_reinit_locked(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 83e557c7f279..975eeb885ca2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1553,7 +1553,7 @@ static void e1000_diag_test(struct net_device *netdev,
1553 1553
1554 if (if_running) 1554 if (if_running)
1555 /* indicate we're in test mode */ 1555 /* indicate we're in test mode */
1556 dev_close(netdev); 1556 e1000_close(netdev);
1557 else 1557 else
1558 e1000_reset(adapter); 1558 e1000_reset(adapter);
1559 1559
@@ -1582,7 +1582,7 @@ static void e1000_diag_test(struct net_device *netdev,
1582 e1000_reset(adapter); 1582 e1000_reset(adapter);
1583 clear_bit(__E1000_TESTING, &adapter->flags); 1583 clear_bit(__E1000_TESTING, &adapter->flags);
1584 if (if_running) 1584 if (if_running)
1585 dev_open(netdev); 1585 e1000_open(netdev);
1586 } else { 1586 } else {
1587 e_info(hw, "online testing starting\n"); 1587 e_info(hw, "online testing starting\n");
1588 /* Online tests */ 1588 /* Online tests */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3fc7bde699ba..f42129d09e2c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -114,8 +114,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114static void e1000_remove(struct pci_dev *pdev); 114static void e1000_remove(struct pci_dev *pdev);
115static int e1000_alloc_queues(struct e1000_adapter *adapter); 115static int e1000_alloc_queues(struct e1000_adapter *adapter);
116static int e1000_sw_init(struct e1000_adapter *adapter); 116static int e1000_sw_init(struct e1000_adapter *adapter);
117static int e1000_open(struct net_device *netdev); 117int e1000_open(struct net_device *netdev);
118static int e1000_close(struct net_device *netdev); 118int e1000_close(struct net_device *netdev);
119static void e1000_configure_tx(struct e1000_adapter *adapter); 119static void e1000_configure_tx(struct e1000_adapter *adapter);
120static void e1000_configure_rx(struct e1000_adapter *adapter); 120static void e1000_configure_rx(struct e1000_adapter *adapter);
121static void e1000_setup_rctl(struct e1000_adapter *adapter); 121static void e1000_setup_rctl(struct e1000_adapter *adapter);
@@ -1360,7 +1360,7 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
1360 * handler is registered with the OS, the watchdog task is started, 1360 * handler is registered with the OS, the watchdog task is started,
1361 * and the stack is notified that the interface is ready. 1361 * and the stack is notified that the interface is ready.
1362 **/ 1362 **/
1363static int e1000_open(struct net_device *netdev) 1363int e1000_open(struct net_device *netdev)
1364{ 1364{
1365 struct e1000_adapter *adapter = netdev_priv(netdev); 1365 struct e1000_adapter *adapter = netdev_priv(netdev);
1366 struct e1000_hw *hw = &adapter->hw; 1366 struct e1000_hw *hw = &adapter->hw;
@@ -1437,7 +1437,7 @@ err_setup_tx:
1437 * needs to be disabled. A global MAC reset is issued to stop the 1437 * needs to be disabled. A global MAC reset is issued to stop the
1438 * hardware, and all transmit and receive resources are freed. 1438 * hardware, and all transmit and receive resources are freed.
1439 **/ 1439 **/
1440static int e1000_close(struct net_device *netdev) 1440int e1000_close(struct net_device *netdev)
1441{ 1441{
1442 struct e1000_adapter *adapter = netdev_priv(netdev); 1442 struct e1000_adapter *adapter = netdev_priv(netdev);
1443 struct e1000_hw *hw = &adapter->hw; 1443 struct e1000_hw *hw = &adapter->hw;
@@ -3106,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev,
3106 return __e1000_maybe_stop_tx(netdev, size); 3106 return __e1000_maybe_stop_tx(netdev, size);
3107} 3107}
3108 3108
3109#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) 3109#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3110static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 3110static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3111 struct net_device *netdev) 3111 struct net_device *netdev)
3112{ 3112{
@@ -3256,12 +3256,29 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3256 nr_frags, mss); 3256 nr_frags, mss);
3257 3257
3258 if (count) { 3258 if (count) {
3259 /* The descriptors needed is higher than other Intel drivers
3260 * due to a number of workarounds. The breakdown is below:
3261 * Data descriptors: MAX_SKB_FRAGS + 1
3262 * Context Descriptor: 1
3263 * Keep head from touching tail: 2
3264 * Workarounds: 3
3265 */
3266 int desc_needed = MAX_SKB_FRAGS + 7;
3267
3259 netdev_sent_queue(netdev, skb->len); 3268 netdev_sent_queue(netdev, skb->len);
3260 skb_tx_timestamp(skb); 3269 skb_tx_timestamp(skb);
3261 3270
3262 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3271 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3272
3273 /* 82544 potentially requires twice as many data descriptors
3274 * in order to guarantee buffers don't end on evenly-aligned
3275 * dwords
3276 */
3277 if (adapter->pcix_82544)
3278 desc_needed += MAX_SKB_FRAGS + 1;
3279
3263 /* Make sure there is space in the ring for the next send. */ 3280 /* Make sure there is space in the ring for the next send. */
3264 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3281 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3265 3282
3266 if (!skb->xmit_more || 3283 if (!skb->xmit_more ||
3267 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { 3284 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1dc293bad87b..52eb641fc9dc 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -480,6 +480,8 @@ extern const char e1000e_driver_version[];
480void e1000e_check_options(struct e1000_adapter *adapter); 480void e1000e_check_options(struct e1000_adapter *adapter);
481void e1000e_set_ethtool_ops(struct net_device *netdev); 481void e1000e_set_ethtool_ops(struct net_device *netdev);
482 482
483int e1000e_open(struct net_device *netdev);
484int e1000e_close(struct net_device *netdev);
483void e1000e_up(struct e1000_adapter *adapter); 485void e1000e_up(struct e1000_adapter *adapter);
484void e1000e_down(struct e1000_adapter *adapter, bool reset); 486void e1000e_down(struct e1000_adapter *adapter, bool reset);
485void e1000e_reinit_locked(struct e1000_adapter *adapter); 487void e1000e_reinit_locked(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 6cab1f30d41e..1e3973aa707c 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1816,7 +1816,7 @@ static void e1000_diag_test(struct net_device *netdev,
1816 1816
1817 if (if_running) 1817 if (if_running)
1818 /* indicate we're in test mode */ 1818 /* indicate we're in test mode */
1819 dev_close(netdev); 1819 e1000e_close(netdev);
1820 1820
1821 if (e1000_reg_test(adapter, &data[0])) 1821 if (e1000_reg_test(adapter, &data[0]))
1822 eth_test->flags |= ETH_TEST_FL_FAILED; 1822 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1849,7 +1849,7 @@ static void e1000_diag_test(struct net_device *netdev,
1849 1849
1850 clear_bit(__E1000_TESTING, &adapter->state); 1850 clear_bit(__E1000_TESTING, &adapter->state);
1851 if (if_running) 1851 if (if_running)
1852 dev_open(netdev); 1852 e1000e_open(netdev);
1853 } else { 1853 } else {
1854 /* Online tests */ 1854 /* Online tests */
1855 1855
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9b4ec13d9161..a7f16c35ebcd 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4495,7 +4495,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
4495} 4495}
4496 4496
4497/** 4497/**
4498 * e1000_open - Called when a network interface is made active 4498 * e1000e_open - Called when a network interface is made active
4499 * @netdev: network interface device structure 4499 * @netdev: network interface device structure
4500 * 4500 *
4501 * Returns 0 on success, negative value on failure 4501 * Returns 0 on success, negative value on failure
@@ -4506,7 +4506,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
4506 * handler is registered with the OS, the watchdog timer is started, 4506 * handler is registered with the OS, the watchdog timer is started,
4507 * and the stack is notified that the interface is ready. 4507 * and the stack is notified that the interface is ready.
4508 **/ 4508 **/
4509static int e1000_open(struct net_device *netdev) 4509int e1000e_open(struct net_device *netdev)
4510{ 4510{
4511 struct e1000_adapter *adapter = netdev_priv(netdev); 4511 struct e1000_adapter *adapter = netdev_priv(netdev);
4512 struct e1000_hw *hw = &adapter->hw; 4512 struct e1000_hw *hw = &adapter->hw;
@@ -4604,7 +4604,7 @@ err_setup_tx:
4604} 4604}
4605 4605
4606/** 4606/**
4607 * e1000_close - Disables a network interface 4607 * e1000e_close - Disables a network interface
4608 * @netdev: network interface device structure 4608 * @netdev: network interface device structure
4609 * 4609 *
4610 * Returns 0, this is not allowed to fail 4610 * Returns 0, this is not allowed to fail
@@ -4614,7 +4614,7 @@ err_setup_tx:
4614 * needs to be disabled. A global MAC reset is issued to stop the 4614 * needs to be disabled. A global MAC reset is issued to stop the
4615 * hardware, and all transmit and receive resources are freed. 4615 * hardware, and all transmit and receive resources are freed.
4616 **/ 4616 **/
4617static int e1000_close(struct net_device *netdev) 4617int e1000e_close(struct net_device *netdev)
4618{ 4618{
4619 struct e1000_adapter *adapter = netdev_priv(netdev); 4619 struct e1000_adapter *adapter = netdev_priv(netdev);
4620 struct pci_dev *pdev = adapter->pdev; 4620 struct pci_dev *pdev = adapter->pdev;
@@ -6920,8 +6920,8 @@ static int e1000_set_features(struct net_device *netdev,
6920} 6920}
6921 6921
6922static const struct net_device_ops e1000e_netdev_ops = { 6922static const struct net_device_ops e1000e_netdev_ops = {
6923 .ndo_open = e1000_open, 6923 .ndo_open = e1000e_open,
6924 .ndo_stop = e1000_close, 6924 .ndo_stop = e1000e_close,
6925 .ndo_start_xmit = e1000_xmit_frame, 6925 .ndo_start_xmit = e1000_xmit_frame,
6926 .ndo_get_stats64 = e1000e_get_stats64, 6926 .ndo_get_stats64 = e1000e_get_stats64,
6927 .ndo_set_rx_mode = e1000e_set_rx_mode, 6927 .ndo_set_rx_mode = e1000e_set_rx_mode,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index b34bb008b104..9c7fafef7cf6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -262,12 +262,12 @@ struct fm10k_intfc {
262 unsigned long state; 262 unsigned long state;
263 263
264 u32 flags; 264 u32 flags;
265#define FM10K_FLAG_RESET_REQUESTED (u32)(1 << 0) 265#define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0))
266#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(1 << 1) 266#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1))
267#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2) 267#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2))
268#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3) 268#define FM10K_FLAG_RX_TS_ENABLED (u32)(BIT(3))
269#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4) 269#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(4))
270#define FM10K_FLAG_DEBUG_STATS (u32)(1 << 5) 270#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(5))
271 int xcast_mode; 271 int xcast_mode;
272 272
273 /* Tx fast path data */ 273 /* Tx fast path data */
@@ -510,6 +510,8 @@ int fm10k_close(struct net_device *netdev);
510 510
511/* Ethtool */ 511/* Ethtool */
512void fm10k_set_ethtool_ops(struct net_device *dev); 512void fm10k_set_ethtool_ops(struct net_device *dev);
513u32 fm10k_get_reta_size(struct net_device *netdev);
514void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir);
513 515
514/* IOV */ 516/* IOV */
515s32 fm10k_iov_event(struct fm10k_intfc *interface); 517s32 fm10k_iov_event(struct fm10k_intfc *interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 2f6a05b57228..a23748777b1b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -153,57 +153,51 @@ static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
153 "debug-statistics", 153 "debug-statistics",
154}; 154};
155 155
156static void fm10k_add_stat_strings(char **p, const char *prefix,
157 const struct fm10k_stats stats[],
158 const unsigned int size)
159{
160 unsigned int i;
161
162 for (i = 0; i < size; i++) {
163 snprintf(*p, ETH_GSTRING_LEN, "%s%s",
164 prefix, stats[i].stat_string);
165 *p += ETH_GSTRING_LEN;
166 }
167}
168
156static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) 169static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
157{ 170{
158 struct fm10k_intfc *interface = netdev_priv(dev); 171 struct fm10k_intfc *interface = netdev_priv(dev);
159 struct fm10k_iov_data *iov_data = interface->iov_data; 172 struct fm10k_iov_data *iov_data = interface->iov_data;
160 char *p = (char *)data; 173 char *p = (char *)data;
161 unsigned int i; 174 unsigned int i;
162 unsigned int j;
163 175
164 for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { 176 fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats,
165 memcpy(p, fm10k_gstrings_net_stats[i].stat_string, 177 FM10K_NETDEV_STATS_LEN);
166 ETH_GSTRING_LEN);
167 p += ETH_GSTRING_LEN;
168 }
169 178
170 for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { 179 fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats,
171 memcpy(p, fm10k_gstrings_global_stats[i].stat_string, 180 FM10K_GLOBAL_STATS_LEN);
172 ETH_GSTRING_LEN);
173 p += ETH_GSTRING_LEN;
174 }
175 181
176 if (interface->flags & FM10K_FLAG_DEBUG_STATS) { 182 if (interface->flags & FM10K_FLAG_DEBUG_STATS)
177 for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { 183 fm10k_add_stat_strings(&p, "", fm10k_gstrings_debug_stats,
178 memcpy(p, fm10k_gstrings_debug_stats[i].stat_string, 184 FM10K_DEBUG_STATS_LEN);
179 ETH_GSTRING_LEN);
180 p += ETH_GSTRING_LEN;
181 }
182 }
183 185
184 for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { 186 fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats,
185 memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string, 187 FM10K_MBX_STATS_LEN);
186 ETH_GSTRING_LEN);
187 p += ETH_GSTRING_LEN;
188 }
189 188
190 if (interface->hw.mac.type != fm10k_mac_vf) { 189 if (interface->hw.mac.type != fm10k_mac_vf)
191 for (i = 0; i < FM10K_PF_STATS_LEN; i++) { 190 fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats,
192 memcpy(p, fm10k_gstrings_pf_stats[i].stat_string, 191 FM10K_PF_STATS_LEN);
193 ETH_GSTRING_LEN);
194 p += ETH_GSTRING_LEN;
195 }
196 }
197 192
198 if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { 193 if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
199 for (i = 0; i < iov_data->num_vfs; i++) { 194 for (i = 0; i < iov_data->num_vfs; i++) {
200 for (j = 0; j < FM10K_MBX_STATS_LEN; j++) { 195 char prefix[ETH_GSTRING_LEN];
201 snprintf(p, 196
202 ETH_GSTRING_LEN, 197 snprintf(prefix, ETH_GSTRING_LEN, "vf_%u_", i);
203 "vf_%u_%s", i, 198 fm10k_add_stat_strings(&p, prefix,
204 fm10k_gstrings_mbx_stats[j].stat_string); 199 fm10k_gstrings_mbx_stats,
205 p += ETH_GSTRING_LEN; 200 FM10K_MBX_STATS_LEN);
206 }
207 } 201 }
208 } 202 }
209 203
@@ -271,6 +265,41 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
271 } 265 }
272} 266}
273 267
268static void fm10k_add_ethtool_stats(u64 **data, void *pointer,
269 const struct fm10k_stats stats[],
270 const unsigned int size)
271{
272 unsigned int i;
273 char *p;
274
275 /* simply skip forward if we were not given a valid pointer */
276 if (!pointer) {
277 *data += size;
278 return;
279 }
280
281 for (i = 0; i < size; i++) {
282 p = (char *)pointer + stats[i].stat_offset;
283
284 switch (stats[i].sizeof_stat) {
285 case sizeof(u64):
286 *((*data)++) = *(u64 *)p;
287 break;
288 case sizeof(u32):
289 *((*data)++) = *(u32 *)p;
290 break;
291 case sizeof(u16):
292 *((*data)++) = *(u16 *)p;
293 break;
294 case sizeof(u8):
295 *((*data)++) = *(u8 *)p;
296 break;
297 default:
298 *((*data)++) = 0;
299 }
300 }
301}
302
274static void fm10k_get_ethtool_stats(struct net_device *netdev, 303static void fm10k_get_ethtool_stats(struct net_device *netdev,
275 struct ethtool_stats __always_unused *stats, 304 struct ethtool_stats __always_unused *stats,
276 u64 *data) 305 u64 *data)
@@ -279,47 +308,29 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
279 struct fm10k_intfc *interface = netdev_priv(netdev); 308 struct fm10k_intfc *interface = netdev_priv(netdev);
280 struct fm10k_iov_data *iov_data = interface->iov_data; 309 struct fm10k_iov_data *iov_data = interface->iov_data;
281 struct net_device_stats *net_stats = &netdev->stats; 310 struct net_device_stats *net_stats = &netdev->stats;
282 char *p;
283 int i, j; 311 int i, j;
284 312
285 fm10k_update_stats(interface); 313 fm10k_update_stats(interface);
286 314
287 for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { 315 fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats,
288 p = (char *)net_stats + fm10k_gstrings_net_stats[i].stat_offset; 316 FM10K_NETDEV_STATS_LEN);
289 *(data++) = (fm10k_gstrings_net_stats[i].sizeof_stat ==
290 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
291 }
292 317
293 for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { 318 fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats,
294 p = (char *)interface + 319 FM10K_GLOBAL_STATS_LEN);
295 fm10k_gstrings_global_stats[i].stat_offset;
296 *(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat ==
297 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
298 }
299 320
300 if (interface->flags & FM10K_FLAG_DEBUG_STATS) { 321 if (interface->flags & FM10K_FLAG_DEBUG_STATS)
301 for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { 322 fm10k_add_ethtool_stats(&data, interface,
302 p = (char *)interface + 323 fm10k_gstrings_debug_stats,
303 fm10k_gstrings_debug_stats[i].stat_offset; 324 FM10K_DEBUG_STATS_LEN);
304 *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat ==
305 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
306 }
307 }
308 325
309 for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { 326 fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
310 p = (char *)&interface->hw.mbx + 327 fm10k_gstrings_mbx_stats,
311 fm10k_gstrings_mbx_stats[i].stat_offset; 328 FM10K_MBX_STATS_LEN);
312 *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat ==
313 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
314 }
315 329
316 if (interface->hw.mac.type != fm10k_mac_vf) { 330 if (interface->hw.mac.type != fm10k_mac_vf) {
317 for (i = 0; i < FM10K_PF_STATS_LEN; i++) { 331 fm10k_add_ethtool_stats(&data, interface,
318 p = (char *)interface + 332 fm10k_gstrings_pf_stats,
319 fm10k_gstrings_pf_stats[i].stat_offset; 333 FM10K_PF_STATS_LEN);
320 *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat ==
321 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
322 }
323 } 334 }
324 335
325 if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { 336 if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
@@ -328,18 +339,9 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
328 339
329 vf_info = &iov_data->vf_info[i]; 340 vf_info = &iov_data->vf_info[i];
330 341
331 /* skip stats if we don't have a vf info */ 342 fm10k_add_ethtool_stats(&data, &vf_info->mbx,
332 if (!vf_info) { 343 fm10k_gstrings_mbx_stats,
333 data += FM10K_MBX_STATS_LEN; 344 FM10K_MBX_STATS_LEN);
334 continue;
335 }
336
337 for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
338 p = (char *)&vf_info->mbx +
339 fm10k_gstrings_mbx_stats[j].stat_offset;
340 *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat ==
341 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
342 }
343 } 345 }
344 } 346 }
345 347
@@ -425,7 +427,7 @@ static void fm10k_get_regs(struct net_device *netdev,
425 u32 *buff = p; 427 u32 *buff = p;
426 u16 i; 428 u16 i;
427 429
428 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; 430 regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id;
429 431
430 switch (hw->mac.type) { 432 switch (hw->mac.type) {
431 case fm10k_mac_pf: 433 case fm10k_mac_pf:
@@ -935,15 +937,15 @@ static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
935 struct fm10k_mbx_info *mbx = &hw->mbx; 937 struct fm10k_mbx_info *mbx = &hw->mbx;
936 u32 attr_flag, test_msg[6]; 938 u32 attr_flag, test_msg[6];
937 unsigned long timeout; 939 unsigned long timeout;
938 int err; 940 int err = -EINVAL;
939 941
940 /* For now this is a VF only feature */ 942 /* For now this is a VF only feature */
941 if (hw->mac.type != fm10k_mac_vf) 943 if (hw->mac.type != fm10k_mac_vf)
942 return 0; 944 return 0;
943 945
944 /* loop through both nested and unnested attribute types */ 946 /* loop through both nested and unnested attribute types */
945 for (attr_flag = (1 << FM10K_TEST_MSG_UNSET); 947 for (attr_flag = BIT(FM10K_TEST_MSG_UNSET);
946 attr_flag < (1 << (2 * FM10K_TEST_MSG_NESTED)); 948 attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED);
947 attr_flag += attr_flag) { 949 attr_flag += attr_flag) {
948 /* generate message to be tested */ 950 /* generate message to be tested */
949 fm10k_tlv_msg_test_create(test_msg, attr_flag); 951 fm10k_tlv_msg_test_create(test_msg, attr_flag);
@@ -1005,7 +1007,7 @@ static u32 fm10k_get_priv_flags(struct net_device *netdev)
1005 u32 priv_flags = 0; 1007 u32 priv_flags = 0;
1006 1008
1007 if (interface->flags & FM10K_FLAG_DEBUG_STATS) 1009 if (interface->flags & FM10K_FLAG_DEBUG_STATS)
1008 priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS; 1010 priv_flags |= BIT(FM10K_PRV_FLAG_DEBUG_STATS);
1009 1011
1010 return priv_flags; 1012 return priv_flags;
1011} 1013}
@@ -1014,10 +1016,10 @@ static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
1014{ 1016{
1015 struct fm10k_intfc *interface = netdev_priv(netdev); 1017 struct fm10k_intfc *interface = netdev_priv(netdev);
1016 1018
1017 if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN)) 1019 if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN))
1018 return -EINVAL; 1020 return -EINVAL;
1019 1021
1020 if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS)) 1022 if (priv_flags & BIT(FM10K_PRV_FLAG_DEBUG_STATS))
1021 interface->flags |= FM10K_FLAG_DEBUG_STATS; 1023 interface->flags |= FM10K_FLAG_DEBUG_STATS;
1022 else 1024 else
1023 interface->flags &= ~FM10K_FLAG_DEBUG_STATS; 1025 interface->flags &= ~FM10K_FLAG_DEBUG_STATS;
@@ -1025,11 +1027,31 @@ static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
1025 return 0; 1027 return 0;
1026} 1028}
1027 1029
1028static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) 1030u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
1029{ 1031{
1030 return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; 1032 return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
1031} 1033}
1032 1034
1035void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
1036{
1037 struct fm10k_hw *hw = &interface->hw;
1038 int i;
1039
1040 /* record entries to reta table */
1041 for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
1042 u32 reta = indir[0] |
1043 (indir[1] << 8) |
1044 (indir[2] << 16) |
1045 (indir[3] << 24);
1046
1047 if (interface->reta[i] == reta)
1048 continue;
1049
1050 interface->reta[i] = reta;
1051 fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
1052 }
1053}
1054
1033static int fm10k_get_reta(struct net_device *netdev, u32 *indir) 1055static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
1034{ 1056{
1035 struct fm10k_intfc *interface = netdev_priv(netdev); 1057 struct fm10k_intfc *interface = netdev_priv(netdev);
@@ -1053,7 +1075,6 @@ static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
1053static int fm10k_set_reta(struct net_device *netdev, const u32 *indir) 1075static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
1054{ 1076{
1055 struct fm10k_intfc *interface = netdev_priv(netdev); 1077 struct fm10k_intfc *interface = netdev_priv(netdev);
1056 struct fm10k_hw *hw = &interface->hw;
1057 int i; 1078 int i;
1058 u16 rss_i; 1079 u16 rss_i;
1059 1080
@@ -1068,19 +1089,7 @@ static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
1068 return -EINVAL; 1089 return -EINVAL;
1069 } 1090 }
1070 1091
1071 /* record entries to reta table */ 1092 fm10k_write_reta(interface, indir);
1072 for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
1073 u32 reta = indir[0] |
1074 (indir[1] << 8) |
1075 (indir[2] << 16) |
1076 (indir[3] << 24);
1077
1078 if (interface->reta[i] == reta)
1079 continue;
1080
1081 interface->reta[i] = reta;
1082 fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
1083 }
1084 1093
1085 return 0; 1094 return 0;
1086} 1095}
@@ -1145,7 +1154,7 @@ static unsigned int fm10k_max_channels(struct net_device *dev)
1145 1154
1146 /* For QoS report channels per traffic class */ 1155 /* For QoS report channels per traffic class */
1147 if (tcs > 1) 1156 if (tcs > 1)
1148 max_combined = 1 << (fls(max_combined / tcs) - 1); 1157 max_combined = BIT((fls(max_combined / tcs) - 1));
1149 1158
1150 return max_combined; 1159 return max_combined;
1151} 1160}
@@ -1210,11 +1219,9 @@ static int fm10k_get_ts_info(struct net_device *dev,
1210 else 1219 else
1211 info->phc_index = -1; 1220 info->phc_index = -1;
1212 1221
1213 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 1222 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1214 (1 << HWTSTAMP_TX_ON);
1215 1223
1216 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 1224 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1217 (1 << HWTSTAMP_FILTER_ALL);
1218 1225
1219 return 0; 1226 return 0;
1220} 1227}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index acfb8b1f88a7..bbf7c4bac303 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -50,7 +50,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface)
50 s64 vflre; 50 s64 vflre;
51 int i; 51 int i;
52 52
53 /* if there is no iov_data then there is no mailboxes to process */ 53 /* if there is no iov_data then there is no mailbox to process */
54 if (!ACCESS_ONCE(interface->iov_data)) 54 if (!ACCESS_ONCE(interface->iov_data))
55 return 0; 55 return 0;
56 56
@@ -98,7 +98,7 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
98 struct fm10k_iov_data *iov_data; 98 struct fm10k_iov_data *iov_data;
99 int i; 99 int i;
100 100
101 /* if there is no iov_data then there is no mailboxes to process */ 101 /* if there is no iov_data then there is no mailbox to process */
102 if (!ACCESS_ONCE(interface->iov_data)) 102 if (!ACCESS_ONCE(interface->iov_data))
103 return 0; 103 return 0;
104 104
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 4de17db3808c..0b465394f88a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -401,10 +401,10 @@ static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
401} 401}
402 402
403#define FM10K_RSS_L4_TYPES_MASK \ 403#define FM10K_RSS_L4_TYPES_MASK \
404 ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \ 404 (BIT(FM10K_RSSTYPE_IPV4_TCP) | \
405 (1ul << FM10K_RSSTYPE_IPV4_UDP) | \ 405 BIT(FM10K_RSSTYPE_IPV4_UDP) | \
406 (1ul << FM10K_RSSTYPE_IPV6_TCP) | \ 406 BIT(FM10K_RSSTYPE_IPV6_TCP) | \
407 (1ul << FM10K_RSSTYPE_IPV6_UDP)) 407 BIT(FM10K_RSSTYPE_IPV6_UDP))
408 408
409static inline void fm10k_rx_hash(struct fm10k_ring *ring, 409static inline void fm10k_rx_hash(struct fm10k_ring *ring,
410 union fm10k_rx_desc *rx_desc, 410 union fm10k_rx_desc *rx_desc,
@@ -420,7 +420,7 @@ static inline void fm10k_rx_hash(struct fm10k_ring *ring,
420 return; 420 return;
421 421
422 skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), 422 skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
423 (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? 423 (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ?
424 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 424 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
425} 425}
426 426
@@ -1409,7 +1409,7 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
1409 * accounts for changes in the ITR due to PCIe link speed. 1409 * accounts for changes in the ITR due to PCIe link speed.
1410 */ 1410 */
1411 itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8; 1411 itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8;
1412 avg_wire_size += (1 << itr_round) - 1; 1412 avg_wire_size += BIT(itr_round) - 1;
1413 avg_wire_size >>= itr_round; 1413 avg_wire_size >>= itr_round;
1414 1414
1415 /* write back value and retain adaptive flag */ 1415 /* write back value and retain adaptive flag */
@@ -1511,17 +1511,17 @@ static bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
1511 /* set QoS mask and indices */ 1511 /* set QoS mask and indices */
1512 f = &interface->ring_feature[RING_F_QOS]; 1512 f = &interface->ring_feature[RING_F_QOS];
1513 f->indices = pcs; 1513 f->indices = pcs;
1514 f->mask = (1 << fls(pcs - 1)) - 1; 1514 f->mask = BIT(fls(pcs - 1)) - 1;
1515 1515
1516 /* determine the upper limit for our current DCB mode */ 1516 /* determine the upper limit for our current DCB mode */
1517 rss_i = interface->hw.mac.max_queues / pcs; 1517 rss_i = interface->hw.mac.max_queues / pcs;
1518 rss_i = 1 << (fls(rss_i) - 1); 1518 rss_i = BIT(fls(rss_i) - 1);
1519 1519
1520 /* set RSS mask and indices */ 1520 /* set RSS mask and indices */
1521 f = &interface->ring_feature[RING_F_RSS]; 1521 f = &interface->ring_feature[RING_F_RSS];
1522 rss_i = min_t(u16, rss_i, f->limit); 1522 rss_i = min_t(u16, rss_i, f->limit);
1523 f->indices = rss_i; 1523 f->indices = rss_i;
1524 f->mask = (1 << fls(rss_i - 1)) - 1; 1524 f->mask = BIT(fls(rss_i - 1)) - 1;
1525 1525
1526 /* configure pause class to queue mapping */ 1526 /* configure pause class to queue mapping */
1527 for (i = 0; i < pcs; i++) 1527 for (i = 0; i < pcs; i++)
@@ -1551,7 +1551,7 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
1551 1551
1552 /* record indices and power of 2 mask for RSS */ 1552 /* record indices and power of 2 mask for RSS */
1553 f->indices = rss_i; 1553 f->indices = rss_i;
1554 f->mask = (1 << fls(rss_i - 1)) - 1; 1554 f->mask = BIT(fls(rss_i - 1)) - 1;
1555 1555
1556 interface->num_rx_queues = rss_i; 1556 interface->num_rx_queues = rss_i;
1557 interface->num_tx_queues = rss_i; 1557 interface->num_tx_queues = rss_i;
@@ -1572,17 +1572,29 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
1572 **/ 1572 **/
1573static void fm10k_set_num_queues(struct fm10k_intfc *interface) 1573static void fm10k_set_num_queues(struct fm10k_intfc *interface)
1574{ 1574{
1575 /* Start with base case */ 1575 /* Attempt to setup QoS and RSS first */
1576 interface->num_rx_queues = 1;
1577 interface->num_tx_queues = 1;
1578
1579 if (fm10k_set_qos_queues(interface)) 1576 if (fm10k_set_qos_queues(interface))
1580 return; 1577 return;
1581 1578
1579 /* If we don't have QoS, just fallback to only RSS. */
1582 fm10k_set_rss_queues(interface); 1580 fm10k_set_rss_queues(interface);
1583} 1581}
1584 1582
1585/** 1583/**
1584 * fm10k_reset_num_queues - Reset the number of queues to zero
1585 * @interface: board private structure
1586 *
1587 * This function should be called whenever we need to reset the number of
1588 * queues after an error condition.
1589 */
1590static void fm10k_reset_num_queues(struct fm10k_intfc *interface)
1591{
1592 interface->num_tx_queues = 0;
1593 interface->num_rx_queues = 0;
1594 interface->num_q_vectors = 0;
1595}
1596
1597/**
1586 * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector 1598 * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
1587 * @interface: board private structure to initialize 1599 * @interface: board private structure to initialize
1588 * @v_count: q_vectors allocated on interface, used for ring interleaving 1600 * @v_count: q_vectors allocated on interface, used for ring interleaving
@@ -1765,9 +1777,7 @@ static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
1765 return 0; 1777 return 0;
1766 1778
1767err_out: 1779err_out:
1768 interface->num_tx_queues = 0; 1780 fm10k_reset_num_queues(interface);
1769 interface->num_rx_queues = 0;
1770 interface->num_q_vectors = 0;
1771 1781
1772 while (v_idx--) 1782 while (v_idx--)
1773 fm10k_free_q_vector(interface, v_idx); 1783 fm10k_free_q_vector(interface, v_idx);
@@ -1787,9 +1797,7 @@ static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
1787{ 1797{
1788 int v_idx = interface->num_q_vectors; 1798 int v_idx = interface->num_q_vectors;
1789 1799
1790 interface->num_tx_queues = 0; 1800 fm10k_reset_num_queues(interface);
1791 interface->num_rx_queues = 0;
1792 interface->num_q_vectors = 0;
1793 1801
1794 while (v_idx--) 1802 while (v_idx--)
1795 fm10k_free_q_vector(interface, v_idx); 1803 fm10k_free_q_vector(interface, v_idx);
@@ -1935,7 +1943,8 @@ static void fm10k_assign_rings(struct fm10k_intfc *interface)
1935static void fm10k_init_reta(struct fm10k_intfc *interface) 1943static void fm10k_init_reta(struct fm10k_intfc *interface)
1936{ 1944{
1937 u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; 1945 u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
1938 u32 reta, base; 1946 struct net_device *netdev = interface->netdev;
1947 u32 reta, *indir;
1939 1948
1940 /* If the Rx flow indirection table has been configured manually, we 1949 /* If the Rx flow indirection table has been configured manually, we
1941 * need to maintain it when possible. 1950 * need to maintain it when possible.
@@ -1960,21 +1969,16 @@ static void fm10k_init_reta(struct fm10k_intfc *interface)
1960 } 1969 }
1961 1970
1962repopulate_reta: 1971repopulate_reta:
1963 /* Populate the redirection table 4 entries at a time. To do this 1972 indir = kcalloc(fm10k_get_reta_size(netdev),
1964 * we are generating the results for n and n+2 and then interleaving 1973 sizeof(indir[0]), GFP_KERNEL);
1965 * those with the results with n+1 and n+3.
1966 */
1967 for (i = FM10K_RETA_SIZE; i--;) {
1968 /* first pass generates n and n+2 */
1969 base = ((i * 0x00040004) + 0x00020000) * rss_i;
1970 reta = (base & 0x3F803F80) >> 7;
1971 1974
1972 /* second pass generates n+1 and n+3 */ 1975 /* generate redirection table using the default kernel policy */
1973 base += 0x00010001 * rss_i; 1976 for (i = 0; i < fm10k_get_reta_size(netdev); i++)
1974 reta |= (base & 0x3F803F80) << 1; 1977 indir[i] = ethtool_rxfh_indir_default(i, rss_i);
1975 1978
1976 interface->reta[i] = reta; 1979 fm10k_write_reta(interface, indir);
1977 } 1980
1981 kfree(indir);
1978} 1982}
1979 1983
1980/** 1984/**
@@ -1997,14 +2001,15 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
1997 if (err) { 2001 if (err) {
1998 dev_err(&interface->pdev->dev, 2002 dev_err(&interface->pdev->dev,
1999 "Unable to initialize MSI-X capability\n"); 2003 "Unable to initialize MSI-X capability\n");
2000 return err; 2004 goto err_init_msix;
2001 } 2005 }
2002 2006
2003 /* Allocate memory for queues */ 2007 /* Allocate memory for queues */
2004 err = fm10k_alloc_q_vectors(interface); 2008 err = fm10k_alloc_q_vectors(interface);
2005 if (err) { 2009 if (err) {
2006 fm10k_reset_msix_capability(interface); 2010 dev_err(&interface->pdev->dev,
2007 return err; 2011 "Unable to allocate queue vectors\n");
2012 goto err_alloc_q_vectors;
2008 } 2013 }
2009 2014
2010 /* Map rings to devices, and map devices to physical queues */ 2015 /* Map rings to devices, and map devices to physical queues */
@@ -2014,6 +2019,12 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
2014 fm10k_init_reta(interface); 2019 fm10k_init_reta(interface);
2015 2020
2016 return 0; 2021 return 0;
2022
2023err_alloc_q_vectors:
2024 fm10k_reset_msix_capability(interface);
2025err_init_msix:
2026 fm10k_reset_num_queues(interface);
2027 return err;
2017} 2028}
2018 2029
2019/** 2030/**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index d09a8dd71fc2..1d0f0583222c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -440,7 +440,7 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
440 * @sa_family: Address family of new port 440 * @sa_family: Address family of new port
441 * @port: port number used for VXLAN 441 * @port: port number used for VXLAN
442 * 442 *
443 * This funciton is called when a new VXLAN interface has added a new port 443 * This function is called when a new VXLAN interface has added a new port
444 * number to the range that is currently in use for VXLAN. The new port 444 * number to the range that is currently in use for VXLAN. The new port
445 * number is always added to the tail so that the port number list should 445 * number is always added to the tail so that the port number list should
446 * match the order in which the ports were allocated. The head of the list 446 * match the order in which the ports were allocated. The head of the list
@@ -484,7 +484,7 @@ insert_tail:
484 * @sa_family: Address family of freed port 484 * @sa_family: Address family of freed port
485 * @port: port number used for VXLAN 485 * @port: port number used for VXLAN
486 * 486 *
487 * This funciton is called when a new VXLAN interface has freed a port 487 * This function is called when a new VXLAN interface has freed a port
488 * number from the range that is currently in use for VXLAN. The freed 488 * number from the range that is currently in use for VXLAN. The freed
489 * port is removed from the list and the new head is used to determine 489 * port is removed from the list and the new head is used to determine
490 * the port number for offloads. 490 * the port number for offloads.
@@ -1429,7 +1429,7 @@ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
1429 1429
1430 /* configure default debug level */ 1430 /* configure default debug level */
1431 interface = netdev_priv(dev); 1431 interface = netdev_priv(dev);
1432 interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 1432 interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
1433 1433
1434 /* configure default features */ 1434 /* configure default features */
1435 dev->features |= NETIF_F_IP_CSUM | 1435 dev->features |= NETIF_F_IP_CSUM |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 4eb7a6fa6b0d..f0992950e228 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -99,7 +99,7 @@ void fm10k_service_event_schedule(struct fm10k_intfc *interface)
99 99
100static void fm10k_service_event_complete(struct fm10k_intfc *interface) 100static void fm10k_service_event_complete(struct fm10k_intfc *interface)
101{ 101{
102 BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state)); 102 WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
103 103
104 /* flush memory to make sure state is correct before next watchog */ 104 /* flush memory to make sure state is correct before next watchog */
105 smp_mb__before_atomic(); 105 smp_mb__before_atomic();
@@ -579,7 +579,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
579 u64 tdba = ring->dma; 579 u64 tdba = ring->dma;
580 u32 size = ring->count * sizeof(struct fm10k_tx_desc); 580 u32 size = ring->count * sizeof(struct fm10k_tx_desc);
581 u32 txint = FM10K_INT_MAP_DISABLE; 581 u32 txint = FM10K_INT_MAP_DISABLE;
582 u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT); 582 u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
583 u8 reg_idx = ring->reg_idx; 583 u8 reg_idx = ring->reg_idx;
584 584
585 /* disable queue to avoid issues while updating state */ 585 /* disable queue to avoid issues while updating state */
@@ -730,7 +730,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
730 if (interface->pfc_en) 730 if (interface->pfc_en)
731 rx_pause = interface->pfc_en; 731 rx_pause = interface->pfc_en;
732#endif 732#endif
733 if (!(rx_pause & (1 << ring->qos_pc))) 733 if (!(rx_pause & BIT(ring->qos_pc)))
734 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; 734 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
735 735
736 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); 736 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
@@ -779,7 +779,7 @@ void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
779 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; 779 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
780 u8 reg_idx = ring->reg_idx; 780 u8 reg_idx = ring->reg_idx;
781 781
782 if (!(rx_pause & (1 << ring->qos_pc))) 782 if (!(rx_pause & BIT(ring->qos_pc)))
783 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; 783 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
784 784
785 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); 785 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
@@ -903,8 +903,8 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
903 903
904 /* re-enable mailbox interrupt and indicate 20us delay */ 904 /* re-enable mailbox interrupt and indicate 20us delay */
905 fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR), 905 fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
906 FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> 906 (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
907 hw->mac.itr_scale)); 907 FM10K_ITR_ENABLE);
908 908
909 /* service upstream mailbox */ 909 /* service upstream mailbox */
910 if (fm10k_mbx_trylock(interface)) { 910 if (fm10k_mbx_trylock(interface)) {
@@ -1065,7 +1065,7 @@ static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
1065 if (maxholdq) 1065 if (maxholdq)
1066 fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq); 1066 fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
1067 for (q = 255;;) { 1067 for (q = 255;;) {
1068 if (maxholdq & (1 << 31)) { 1068 if (maxholdq & BIT(31)) {
1069 if (q < FM10K_MAX_QUEUES_PF) { 1069 if (q < FM10K_MAX_QUEUES_PF) {
1070 interface->rx_overrun_pf++; 1070 interface->rx_overrun_pf++;
1071 fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl); 1071 fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
@@ -1135,22 +1135,24 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
1135 1135
1136 /* re-enable mailbox interrupt and indicate 20us delay */ 1136 /* re-enable mailbox interrupt and indicate 20us delay */
1137 fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR), 1137 fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
1138 FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> 1138 (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
1139 hw->mac.itr_scale)); 1139 FM10K_ITR_ENABLE);
1140 1140
1141 return IRQ_HANDLED; 1141 return IRQ_HANDLED;
1142} 1142}
1143 1143
1144void fm10k_mbx_free_irq(struct fm10k_intfc *interface) 1144void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
1145{ 1145{
1146 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
1147 struct fm10k_hw *hw = &interface->hw; 1146 struct fm10k_hw *hw = &interface->hw;
1147 struct msix_entry *entry;
1148 int itr_reg; 1148 int itr_reg;
1149 1149
1150 /* no mailbox IRQ to free if MSI-X is not enabled */ 1150 /* no mailbox IRQ to free if MSI-X is not enabled */
1151 if (!interface->msix_entries) 1151 if (!interface->msix_entries)
1152 return; 1152 return;
1153 1153
1154 entry = &interface->msix_entries[FM10K_MBX_VECTOR];
1155
1154 /* disconnect the mailbox */ 1156 /* disconnect the mailbox */
1155 hw->mbx.ops.disconnect(hw, &hw->mbx); 1157 hw->mbx.ops.disconnect(hw, &hw->mbx);
1156 1158
@@ -1253,7 +1255,7 @@ static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface)
1253 int err; 1255 int err;
1254 1256
1255 /* Use timer0 for interrupt moderation on the mailbox */ 1257 /* Use timer0 for interrupt moderation on the mailbox */
1256 u32 itr = FM10K_INT_MAP_TIMER0 | entry->entry; 1258 u32 itr = entry->entry | FM10K_INT_MAP_TIMER0;
1257 1259
1258 /* register mailbox handlers */ 1260 /* register mailbox handlers */
1259 err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data); 1261 err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
@@ -1377,7 +1379,7 @@ static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results,
1377 return 0; 1379 return 0;
1378 } 1380 }
1379 1381
1380 /* if there is no iov_data then there is no mailboxes to process */ 1382 /* if there is no iov_data then there is no mailbox to process */
1381 if (!ACCESS_ONCE(interface->iov_data)) 1383 if (!ACCESS_ONCE(interface->iov_data))
1382 return FM10K_ERR_PARAM; 1384 return FM10K_ERR_PARAM;
1383 1385
@@ -1420,8 +1422,8 @@ static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
1420 int err; 1422 int err;
1421 1423
1422 /* Use timer0 for interrupt moderation on the mailbox */ 1424 /* Use timer0 for interrupt moderation on the mailbox */
1423 u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry; 1425 u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0;
1424 u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry; 1426 u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE;
1425 1427
1426 /* register mailbox handlers */ 1428 /* register mailbox handlers */
1427 err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data); 1429 err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
@@ -1654,6 +1656,7 @@ void fm10k_down(struct fm10k_intfc *interface)
1654{ 1656{
1655 struct net_device *netdev = interface->netdev; 1657 struct net_device *netdev = interface->netdev;
1656 struct fm10k_hw *hw = &interface->hw; 1658 struct fm10k_hw *hw = &interface->hw;
1659 int err;
1657 1660
1658 /* signal that we are down to the interrupt handler and service task */ 1661 /* signal that we are down to the interrupt handler and service task */
1659 set_bit(__FM10K_DOWN, &interface->state); 1662 set_bit(__FM10K_DOWN, &interface->state);
@@ -1678,7 +1681,9 @@ void fm10k_down(struct fm10k_intfc *interface)
1678 fm10k_update_stats(interface); 1681 fm10k_update_stats(interface);
1679 1682
1680 /* Disable DMA engine for Tx/Rx */ 1683 /* Disable DMA engine for Tx/Rx */
1681 hw->mac.ops.stop_hw(hw); 1684 err = hw->mac.ops.stop_hw(hw);
1685 if (err)
1686 dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
1682 1687
1683 /* free any buffers still on the rings */ 1688 /* free any buffers still on the rings */
1684 fm10k_clean_all_tx_rings(interface); 1689 fm10k_clean_all_tx_rings(interface);
@@ -1776,8 +1781,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
1776 netdev->addr_assign_type |= NET_ADDR_RANDOM; 1781 netdev->addr_assign_type |= NET_ADDR_RANDOM;
1777 } 1782 }
1778 1783
1779 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 1784 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
1780 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); 1785 ether_addr_copy(netdev->perm_addr, hw->mac.addr);
1781 1786
1782 if (!is_valid_ether_addr(netdev->perm_addr)) { 1787 if (!is_valid_ether_addr(netdev->perm_addr)) {
1783 dev_err(&pdev->dev, "Invalid MAC Address\n"); 1788 dev_err(&pdev->dev, "Invalid MAC Address\n");
@@ -1793,15 +1798,6 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
1793 /* initialize DCBNL interface */ 1798 /* initialize DCBNL interface */
1794 fm10k_dcbnl_set_ops(netdev); 1799 fm10k_dcbnl_set_ops(netdev);
1795 1800
1796 /* Initialize service timer and service task */
1797 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1798 setup_timer(&interface->service_timer, &fm10k_service_timer,
1799 (unsigned long)interface);
1800 INIT_WORK(&interface->service_task, fm10k_service_task);
1801
1802 /* kick off service timer now, even when interface is down */
1803 mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
1804
1805 /* Intitialize timestamp data */ 1801 /* Intitialize timestamp data */
1806 fm10k_ts_init(interface); 1802 fm10k_ts_init(interface);
1807 1803
@@ -1987,6 +1983,12 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1987 if (err) 1983 if (err)
1988 goto err_sw_init; 1984 goto err_sw_init;
1989 1985
1986 /* the mbx interrupt might attempt to schedule the service task, so we
1987 * must ensure it is disabled since we haven't yet requested the timer
1988 * or work item.
1989 */
1990 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1991
1990 err = fm10k_mbx_request_irq(interface); 1992 err = fm10k_mbx_request_irq(interface);
1991 if (err) 1993 if (err)
1992 goto err_mbx_interrupt; 1994 goto err_mbx_interrupt;
@@ -2006,6 +2008,16 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2006 /* stop all the transmit queues from transmitting until link is up */ 2008 /* stop all the transmit queues from transmitting until link is up */
2007 netif_tx_stop_all_queues(netdev); 2009 netif_tx_stop_all_queues(netdev);
2008 2010
2011 /* Initialize service timer and service task late in order to avoid
2012 * cleanup issues.
2013 */
2014 setup_timer(&interface->service_timer, &fm10k_service_timer,
2015 (unsigned long)interface);
2016 INIT_WORK(&interface->service_task, fm10k_service_task);
2017
2018 /* kick off service timer now, even when interface is down */
2019 mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
2020
2009 /* Register PTP interface */ 2021 /* Register PTP interface */
2010 fm10k_ptp_register(interface); 2022 fm10k_ptp_register(interface);
2011 2023
@@ -2262,11 +2274,11 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
2262 if (netif_running(netdev)) 2274 if (netif_running(netdev))
2263 fm10k_close(netdev); 2275 fm10k_close(netdev);
2264 2276
2277 fm10k_mbx_free_irq(interface);
2278
2265 /* free interrupts */ 2279 /* free interrupts */
2266 fm10k_clear_queueing_scheme(interface); 2280 fm10k_clear_queueing_scheme(interface);
2267 2281
2268 fm10k_mbx_free_irq(interface);
2269
2270 pci_disable_device(pdev); 2282 pci_disable_device(pdev);
2271 2283
2272 /* Request a slot reset. */ 2284 /* Request a slot reset. */
@@ -2382,7 +2394,7 @@ static struct pci_driver fm10k_driver = {
2382/** 2394/**
2383 * fm10k_register_pci_driver - register driver interface 2395 * fm10k_register_pci_driver - register driver interface
2384 * 2396 *
2385 * This funciton is called on module load in order to register the driver. 2397 * This function is called on module load in order to register the driver.
2386 **/ 2398 **/
2387int fm10k_register_pci_driver(void) 2399int fm10k_register_pci_driver(void)
2388{ 2400{
@@ -2392,7 +2404,7 @@ int fm10k_register_pci_driver(void)
2392/** 2404/**
2393 * fm10k_unregister_pci_driver - unregister driver interface 2405 * fm10k_unregister_pci_driver - unregister driver interface
2394 * 2406 *
2395 * This funciton is called on module unload in order to remove the driver. 2407 * This function is called on module unload in order to remove the driver.
2396 **/ 2408 **/
2397void fm10k_unregister_pci_driver(void) 2409void fm10k_unregister_pci_driver(void)
2398{ 2410{
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 62ccebc5f728..ecc99f9d2cce 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -527,8 +527,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
527 return FM10K_ERR_PARAM; 527 return FM10K_ERR_PARAM;
528 528
529 /* determine count of VSIs and queues */ 529 /* determine count of VSIs and queues */
530 queue_count = 1 << (dglort->rss_l + dglort->pc_l); 530 queue_count = BIT(dglort->rss_l + dglort->pc_l);
531 vsi_count = 1 << (dglort->vsi_l + dglort->queue_l); 531 vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
532 glort = dglort->glort; 532 glort = dglort->glort;
533 q_idx = dglort->queue_b; 533 q_idx = dglort->queue_b;
534 534
@@ -544,8 +544,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
544 } 544 }
545 545
546 /* determine count of PCs and queues */ 546 /* determine count of PCs and queues */
547 queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l); 547 queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
548 pc_count = 1 << dglort->pc_l; 548 pc_count = BIT(dglort->pc_l);
549 549
550 /* configure PC for Tx queues */ 550 /* configure PC for Tx queues */
551 for (pc = 0; pc < pc_count; pc++) { 551 for (pc = 0; pc < pc_count; pc++) {
@@ -711,8 +711,8 @@ static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
711 FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | 711 FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
712 FM10K_RXDCTL_DROP_ON_EMPTY); 712 FM10K_RXDCTL_DROP_ON_EMPTY);
713 fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx), 713 fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx),
714 FM10K_RXQCTL_VF | 714 (i << FM10K_RXQCTL_VF_SHIFT) |
715 (i << FM10K_RXQCTL_VF_SHIFT)); 715 FM10K_RXQCTL_VF);
716 716
717 /* map queue pair to VF */ 717 /* map queue pair to VF */
718 fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); 718 fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
@@ -952,7 +952,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
952 return FM10K_ERR_PARAM; 952 return FM10K_ERR_PARAM;
953 953
954 /* clear event notification of VF FLR */ 954 /* clear event notification of VF FLR */
955 fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32)); 955 fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
956 956
957 /* force timeout and then disconnect the mailbox */ 957 /* force timeout and then disconnect the mailbox */
958 vf_info->mbx.timeout = 0; 958 vf_info->mbx.timeout = 0;
@@ -987,7 +987,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
987 txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) | 987 txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
988 (vf_idx << FM10K_TXQCTL_TC_SHIFT) | 988 (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
989 FM10K_TXQCTL_VF | vf_idx; 989 FM10K_TXQCTL_VF | vf_idx;
990 rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT); 990 rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
991 991
992 /* stop further DMA and reset queue ownership back to VF */ 992 /* stop further DMA and reset queue ownership back to VF */
993 for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) { 993 for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
@@ -1370,7 +1370,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
1370 mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode); 1370 mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
1371 1371
1372 /* if mode is not currently enabled, enable it */ 1372 /* if mode is not currently enabled, enable it */
1373 if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode))) 1373 if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
1374 fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode); 1374 fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
1375 1375
1376 /* swap mode back to a bit flag */ 1376 /* swap mode back to a bit flag */
@@ -1604,7 +1604,7 @@ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
1604 * @hw: pointer to hardware structure 1604 * @hw: pointer to hardware structure
1605 * @switch_ready: pointer to boolean value that will record switch state 1605 * @switch_ready: pointer to boolean value that will record switch state
1606 * 1606 *
1607 * This funciton will check the DMA_CTRL2 register and mailbox in order 1607 * This function will check the DMA_CTRL2 register and mailbox in order
1608 * to determine if the switch is ready for the PF to begin requesting 1608 * to determine if the switch is ready for the PF to begin requesting
1609 * addresses and mapping traffic to the local interface. 1609 * addresses and mapping traffic to the local interface.
1610 **/ 1610 **/
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
index b4945e8abe03..1c1ccade6538 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
@@ -416,7 +416,7 @@ void fm10k_ptp_register(struct fm10k_intfc *interface)
416 /* This math is simply the inverse of the math in 416 /* This math is simply the inverse of the math in
417 * fm10k_adjust_systime_pf applied to an adjustment value 417 * fm10k_adjust_systime_pf applied to an adjustment value
418 * of 2^30 - 1 which is the maximum value of the register: 418 * of 2^30 - 1 which is the maximum value of the register:
419 * max_ppb == ((2^30 - 1) * 5^9) / 2^31 419 * max_ppb == ((2^30 - 1) * 5^9) / 2^31
420 */ 420 */
421 ptp_caps->max_adj = 976562; 421 ptp_caps->max_adj = 976562;
422 ptp_caps->adjfreq = fm10k_ptp_adjfreq; 422 ptp_caps->adjfreq = fm10k_ptp_adjfreq;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index ab01bb30752f..b999897e50d8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -222,7 +222,7 @@ s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len)
222 attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; 222 attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
223 223
224 if (len < 4) { 224 if (len < 4) {
225 attr[1] = (u32)value & ((0x1ul << (8 * len)) - 1); 225 attr[1] = (u32)value & (BIT(8 * len) - 1);
226 } else { 226 } else {
227 attr[1] = (u32)value; 227 attr[1] = (u32)value;
228 if (len > 4) 228 if (len > 4)
@@ -652,29 +652,29 @@ const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = {
652 **/ 652 **/
653static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags) 653static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags)
654{ 654{
655 if (attr_flags & (1 << FM10K_TEST_MSG_STRING)) 655 if (attr_flags & BIT(FM10K_TEST_MSG_STRING))
656 fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING, 656 fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING,
657 test_str); 657 test_str);
658 if (attr_flags & (1 << FM10K_TEST_MSG_MAC_ADDR)) 658 if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR))
659 fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR, 659 fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR,
660 test_mac, test_vlan); 660 test_mac, test_vlan);
661 if (attr_flags & (1 << FM10K_TEST_MSG_U8)) 661 if (attr_flags & BIT(FM10K_TEST_MSG_U8))
662 fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8); 662 fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8);
663 if (attr_flags & (1 << FM10K_TEST_MSG_U16)) 663 if (attr_flags & BIT(FM10K_TEST_MSG_U16))
664 fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16); 664 fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16);
665 if (attr_flags & (1 << FM10K_TEST_MSG_U32)) 665 if (attr_flags & BIT(FM10K_TEST_MSG_U32))
666 fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32); 666 fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32);
667 if (attr_flags & (1 << FM10K_TEST_MSG_U64)) 667 if (attr_flags & BIT(FM10K_TEST_MSG_U64))
668 fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64); 668 fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64);
669 if (attr_flags & (1 << FM10K_TEST_MSG_S8)) 669 if (attr_flags & BIT(FM10K_TEST_MSG_S8))
670 fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8); 670 fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8);
671 if (attr_flags & (1 << FM10K_TEST_MSG_S16)) 671 if (attr_flags & BIT(FM10K_TEST_MSG_S16))
672 fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16); 672 fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16);
673 if (attr_flags & (1 << FM10K_TEST_MSG_S32)) 673 if (attr_flags & BIT(FM10K_TEST_MSG_S32))
674 fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32); 674 fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32);
675 if (attr_flags & (1 << FM10K_TEST_MSG_S64)) 675 if (attr_flags & BIT(FM10K_TEST_MSG_S64))
676 fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64); 676 fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64);
677 if (attr_flags & (1 << FM10K_TEST_MSG_LE_STRUCT)) 677 if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT))
678 fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT, 678 fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT,
679 test_le, 8); 679 test_le, 8);
680} 680}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 854ebb1906bf..5c0533054c5f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -617,10 +617,10 @@ struct fm10k_vf_info {
617 */ 617 */
618}; 618};
619 619
620#define FM10K_VF_FLAG_ALLMULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_ALLMULTI) 620#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI))
621#define FM10K_VF_FLAG_MULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_MULTI) 621#define FM10K_VF_FLAG_MULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_MULTI))
622#define FM10K_VF_FLAG_PROMISC_CAPABLE ((u8)1 << FM10K_XCAST_MODE_PROMISC) 622#define FM10K_VF_FLAG_PROMISC_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_PROMISC))
623#define FM10K_VF_FLAG_NONE_CAPABLE ((u8)1 << FM10K_XCAST_MODE_NONE) 623#define FM10K_VF_FLAG_NONE_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_NONE))
624#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF) 624#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF)
625#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4) 625#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4)
626#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode)) 626#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode))
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index b4729ba57c9c..3b3c63e54ed6 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -41,6 +41,7 @@ i40e-objs := i40e_main.o \
41 i40e_diag.o \ 41 i40e_diag.o \
42 i40e_txrx.o \ 42 i40e_txrx.o \
43 i40e_ptp.o \ 43 i40e_ptp.o \
44 i40e_client.o \
44 i40e_virtchnl_pf.o 45 i40e_virtchnl_pf.o
45 46
46i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o 47i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2f6210ae8ba0..d25b3be5ba89 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -58,6 +58,7 @@
58#ifdef I40E_FCOE 58#ifdef I40E_FCOE
59#include "i40e_fcoe.h" 59#include "i40e_fcoe.h"
60#endif 60#endif
61#include "i40e_client.h"
61#include "i40e_virtchnl.h" 62#include "i40e_virtchnl.h"
62#include "i40e_virtchnl_pf.h" 63#include "i40e_virtchnl_pf.h"
63#include "i40e_txrx.h" 64#include "i40e_txrx.h"
@@ -190,6 +191,7 @@ struct i40e_lump_tracking {
190 u16 search_hint; 191 u16 search_hint;
191 u16 list[0]; 192 u16 list[0];
192#define I40E_PILE_VALID_BIT 0x8000 193#define I40E_PILE_VALID_BIT 0x8000
194#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
193}; 195};
194 196
195#define I40E_DEFAULT_ATR_SAMPLE_RATE 20 197#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
@@ -242,7 +244,6 @@ struct i40e_fdir_filter {
242#define I40E_DCB_PRIO_TYPE_STRICT 0 244#define I40E_DCB_PRIO_TYPE_STRICT 0
243#define I40E_DCB_PRIO_TYPE_ETS 1 245#define I40E_DCB_PRIO_TYPE_ETS 1
244#define I40E_DCB_STRICT_PRIO_CREDITS 127 246#define I40E_DCB_STRICT_PRIO_CREDITS 127
245#define I40E_MAX_USER_PRIORITY 8
246/* DCB per TC information data structure */ 247/* DCB per TC information data structure */
247struct i40e_tc_info { 248struct i40e_tc_info {
248 u16 qoffset; /* Queue offset from base queue */ 249 u16 qoffset; /* Queue offset from base queue */
@@ -282,6 +283,8 @@ struct i40e_pf {
282#endif /* I40E_FCOE */ 283#endif /* I40E_FCOE */
283 u16 num_lan_qps; /* num lan queues this PF has set up */ 284 u16 num_lan_qps; /* num lan queues this PF has set up */
284 u16 num_lan_msix; /* num queue vectors for the base PF vsi */ 285 u16 num_lan_msix; /* num queue vectors for the base PF vsi */
286 u16 num_iwarp_msix; /* num of iwarp vectors for this PF */
287 int iwarp_base_vector;
285 int queues_left; /* queues left unclaimed */ 288 int queues_left; /* queues left unclaimed */
286 u16 alloc_rss_size; /* allocated RSS queues */ 289 u16 alloc_rss_size; /* allocated RSS queues */
287 u16 rss_size_max; /* HW defined max RSS queues */ 290 u16 rss_size_max; /* HW defined max RSS queues */
@@ -329,6 +332,7 @@ struct i40e_pf {
329#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13) 332#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
330#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) 333#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
331#define I40E_FLAG_FILTER_SYNC BIT_ULL(15) 334#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
335#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
332#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17) 336#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17)
333#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18) 337#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18)
334#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19) 338#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
@@ -571,6 +575,8 @@ struct i40e_vsi {
571 struct kobject *kobj; /* sysfs object */ 575 struct kobject *kobj; /* sysfs object */
572 bool current_isup; /* Sync 'link up' logging */ 576 bool current_isup; /* Sync 'link up' logging */
573 577
578 void *priv; /* client driver data reference. */
579
574 /* VSI specific handlers */ 580 /* VSI specific handlers */
575 irqreturn_t (*irq_handler)(int irq, void *data); 581 irqreturn_t (*irq_handler)(int irq, void *data);
576 582
@@ -728,6 +734,10 @@ void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
728 struct i40e_vsi_context *ctxt, 734 struct i40e_vsi_context *ctxt,
729 u8 enabled_tc, bool is_add); 735 u8 enabled_tc, bool is_add);
730#endif 736#endif
737void i40e_service_event_schedule(struct i40e_pf *pf);
738void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
739 u8 *msg, u16 len);
740
731int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable); 741int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
732int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count); 742int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
733struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, 743struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
@@ -750,6 +760,17 @@ static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
750static inline void i40e_dbg_init(void) {} 760static inline void i40e_dbg_init(void) {}
751static inline void i40e_dbg_exit(void) {} 761static inline void i40e_dbg_exit(void) {}
752#endif /* CONFIG_DEBUG_FS*/ 762#endif /* CONFIG_DEBUG_FS*/
763/* needed by client drivers */
764int i40e_lan_add_device(struct i40e_pf *pf);
765int i40e_lan_del_device(struct i40e_pf *pf);
766void i40e_client_subtask(struct i40e_pf *pf);
767void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
768void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi);
769void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
770void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
771void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
772int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
773 enum i40e_client_type type);
753/** 774/**
754 * i40e_irq_dynamic_enable - Enable default interrupt generation settings 775 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
755 * @vsi: pointer to a vsi 776 * @vsi: pointer to a vsi
@@ -789,6 +810,7 @@ int i40e_vlan_rx_kill_vid(struct net_device *netdev,
789 __always_unused __be16 proto, u16 vid); 810 __always_unused __be16 proto, u16 vid);
790#endif 811#endif
791int i40e_open(struct net_device *netdev); 812int i40e_open(struct net_device *netdev);
813int i40e_close(struct net_device *netdev);
792int i40e_vsi_open(struct i40e_vsi *vsi); 814int i40e_vsi_open(struct i40e_vsi *vsi);
793void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); 815void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
794int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); 816int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -801,7 +823,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
801struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 823struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
802 bool is_vf, bool is_netdev); 824 bool is_vf, bool is_netdev);
803#ifdef I40E_FCOE 825#ifdef I40E_FCOE
804int i40e_close(struct net_device *netdev);
805int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, 826int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
806 struct tc_to_netdev *tc); 827 struct tc_to_netdev *tc);
807void i40e_netpoll(struct net_device *netdev); 828void i40e_netpoll(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index df8e2fd6a649..43bb4139d896 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,16 +33,6 @@
33static void i40e_resume_aq(struct i40e_hw *hw); 33static void i40e_resume_aq(struct i40e_hw *hw);
34 34
35/** 35/**
36 * i40e_is_nvm_update_op - return true if this is an NVM update operation
37 * @desc: API request descriptor
38 **/
39static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
40{
41 return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) ||
42 (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update));
43}
44
45/**
46 * i40e_adminq_init_regs - Initialize AdminQ registers 36 * i40e_adminq_init_regs - Initialize AdminQ registers
47 * @hw: pointer to the hardware structure 37 * @hw: pointer to the hardware structure
48 * 38 *
@@ -624,7 +614,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
624 614
625 /* pre-emptive resource lock release */ 615 /* pre-emptive resource lock release */
626 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 616 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
627 hw->aq.nvm_release_on_done = false; 617 hw->nvm_release_on_done = false;
628 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 618 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
629 619
630 ret_code = i40e_aq_set_hmc_resource_profile(hw, 620 ret_code = i40e_aq_set_hmc_resource_profile(hw,
@@ -1023,26 +1013,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
1023 hw->aq.arq.next_to_clean = ntc; 1013 hw->aq.arq.next_to_clean = ntc;
1024 hw->aq.arq.next_to_use = ntu; 1014 hw->aq.arq.next_to_use = ntu;
1025 1015
1026 if (i40e_is_nvm_update_op(&e->desc)) { 1016 i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
1027 if (hw->aq.nvm_release_on_done) {
1028 i40e_release_nvm(hw);
1029 hw->aq.nvm_release_on_done = false;
1030 }
1031
1032 switch (hw->nvmupd_state) {
1033 case I40E_NVMUPD_STATE_INIT_WAIT:
1034 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1035 break;
1036
1037 case I40E_NVMUPD_STATE_WRITE_WAIT:
1038 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1039 break;
1040
1041 default:
1042 break;
1043 }
1044 }
1045
1046clean_arq_element_out: 1017clean_arq_element_out:
1047 /* Set pending if needed, unlock and return */ 1018 /* Set pending if needed, unlock and return */
1048 if (pending) 1019 if (pending)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 12fbbddea299..d92aad38afdc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -97,7 +97,6 @@ struct i40e_adminq_info {
97 u32 fw_build; /* firmware build number */ 97 u32 fw_build; /* firmware build number */
98 u16 api_maj_ver; /* api major version */ 98 u16 api_maj_ver; /* api major version */
99 u16 api_min_ver; /* api minor version */ 99 u16 api_min_ver; /* api minor version */
100 bool nvm_release_on_done;
101 100
102 struct mutex asq_mutex; /* Send queue lock */ 101 struct mutex asq_mutex; /* Send queue lock */
103 struct mutex arq_mutex; /* Receive queue lock */ 102 struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
new file mode 100644
index 000000000000..0e6ac841321c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -0,0 +1,1012 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27#include <linux/list.h>
28#include <linux/errno.h>
29
30#include "i40e.h"
31#include "i40e_prototype.h"
32#include "i40e_client.h"
33
34static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
35
36static LIST_HEAD(i40e_devices);
37static DEFINE_MUTEX(i40e_device_mutex);
38
39static LIST_HEAD(i40e_clients);
40static DEFINE_MUTEX(i40e_client_mutex);
41
42static LIST_HEAD(i40e_client_instances);
43static DEFINE_MUTEX(i40e_client_instance_mutex);
44
45static int i40e_client_virtchnl_send(struct i40e_info *ldev,
46 struct i40e_client *client,
47 u32 vf_id, u8 *msg, u16 len);
48
49static int i40e_client_setup_qvlist(struct i40e_info *ldev,
50 struct i40e_client *client,
51 struct i40e_qvlist_info *qvlist_info);
52
53static void i40e_client_request_reset(struct i40e_info *ldev,
54 struct i40e_client *client,
55 u32 reset_level);
56
57static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
58 struct i40e_client *client,
59 bool is_vf, u32 vf_id,
60 u32 flag, u32 valid_flag);
61
62static struct i40e_ops i40e_lan_ops = {
63 .virtchnl_send = i40e_client_virtchnl_send,
64 .setup_qvlist = i40e_client_setup_qvlist,
65 .request_reset = i40e_client_request_reset,
66 .update_vsi_ctxt = i40e_client_update_vsi_ctxt,
67};
68
69/**
70 * i40e_client_type_to_vsi_type - convert client type to vsi type
71 * @client_type: the i40e_client type
72 *
73 * returns the related vsi type value
74 **/
75static
76enum i40e_vsi_type i40e_client_type_to_vsi_type(enum i40e_client_type type)
77{
78 switch (type) {
79 case I40E_CLIENT_IWARP:
80 return I40E_VSI_IWARP;
81
82 case I40E_CLIENT_VMDQ2:
83 return I40E_VSI_VMDQ2;
84
85 default:
86 pr_err("i40e: Client type unknown\n");
87 return I40E_VSI_TYPE_UNKNOWN;
88 }
89}
90
91/**
92 * i40e_client_get_params - Get the params that can change at runtime
93 * @vsi: the VSI with the message
94 * @param: clinet param struct
95 *
96 **/
97static
98int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
99{
100 struct i40e_dcbx_config *dcb_cfg = &vsi->back->hw.local_dcbx_config;
101 int i = 0;
102
103 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
104 u8 tc = dcb_cfg->etscfg.prioritytable[i];
105 u16 qs_handle;
106
107 /* If TC is not enabled for VSI use TC0 for UP */
108 if (!(vsi->tc_config.enabled_tc & BIT(tc)))
109 tc = 0;
110
111 qs_handle = le16_to_cpu(vsi->info.qs_handle[tc]);
112 params->qos.prio_qos[i].tc = tc;
113 params->qos.prio_qos[i].qs_handle = qs_handle;
114 if (qs_handle == I40E_AQ_VSI_QS_HANDLE_INVALID) {
115 dev_err(&vsi->back->pdev->dev, "Invalid queue set handle for TC = %d, vsi id = %d\n",
116 tc, vsi->id);
117 return -EINVAL;
118 }
119 }
120
121 params->mtu = vsi->netdev->mtu;
122 return 0;
123}
124
125/**
126 * i40e_notify_client_of_vf_msg - call the client vf message callback
127 * @vsi: the VSI with the message
128 * @vf_id: the absolute VF id that sent the message
129 * @msg: message buffer
130 * @len: length of the message
131 *
132 * If there is a client to this VSI, call the client
133 **/
134void
135i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
136{
137 struct i40e_client_instance *cdev;
138
139 if (!vsi)
140 return;
141 mutex_lock(&i40e_client_instance_mutex);
142 list_for_each_entry(cdev, &i40e_client_instances, list) {
143 if (cdev->lan_info.pf == vsi->back) {
144 if (!cdev->client ||
145 !cdev->client->ops ||
146 !cdev->client->ops->virtchnl_receive) {
147 dev_dbg(&vsi->back->pdev->dev,
148 "Cannot locate client instance virtual channel receive routine\n");
149 continue;
150 }
151 cdev->client->ops->virtchnl_receive(&cdev->lan_info,
152 cdev->client,
153 vf_id, msg, len);
154 }
155 }
156 mutex_unlock(&i40e_client_instance_mutex);
157}
158
159/**
160 * i40e_notify_client_of_l2_param_changes - call the client notify callback
161 * @vsi: the VSI with l2 param changes
162 *
163 * If there is a client to this VSI, call the client
164 **/
165void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
166{
167 struct i40e_client_instance *cdev;
168 struct i40e_params params;
169
170 if (!vsi)
171 return;
172 memset(&params, 0, sizeof(params));
173 i40e_client_get_params(vsi, &params);
174 mutex_lock(&i40e_client_instance_mutex);
175 list_for_each_entry(cdev, &i40e_client_instances, list) {
176 if (cdev->lan_info.pf == vsi->back) {
177 if (!cdev->client ||
178 !cdev->client->ops ||
179 !cdev->client->ops->l2_param_change) {
180 dev_dbg(&vsi->back->pdev->dev,
181 "Cannot locate client instance l2_param_change routine\n");
182 continue;
183 }
184 cdev->lan_info.params = params;
185 cdev->client->ops->l2_param_change(&cdev->lan_info,
186 cdev->client,
187 &params);
188 }
189 }
190 mutex_unlock(&i40e_client_instance_mutex);
191}
192
193/**
194 * i40e_notify_client_of_netdev_open - call the client open callback
195 * @vsi: the VSI with netdev opened
196 *
197 * If there is a client to this netdev, call the client with open
198 **/
199void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
200{
201 struct i40e_client_instance *cdev;
202
203 if (!vsi)
204 return;
205 mutex_lock(&i40e_client_instance_mutex);
206 list_for_each_entry(cdev, &i40e_client_instances, list) {
207 if (cdev->lan_info.netdev == vsi->netdev) {
208 if (!cdev->client ||
209 !cdev->client->ops || !cdev->client->ops->open) {
210 dev_dbg(&vsi->back->pdev->dev,
211 "Cannot locate client instance open routine\n");
212 continue;
213 }
214 cdev->client->ops->open(&cdev->lan_info, cdev->client);
215 }
216 }
217 mutex_unlock(&i40e_client_instance_mutex);
218}
219
220/**
221 * i40e_client_release_qvlist
222 * @ldev: pointer to L2 context.
223 *
224 **/
225static void i40e_client_release_qvlist(struct i40e_info *ldev)
226{
227 struct i40e_qvlist_info *qvlist_info = ldev->qvlist_info;
228 u32 i;
229
230 if (!ldev->qvlist_info)
231 return;
232
233 for (i = 0; i < qvlist_info->num_vectors; i++) {
234 struct i40e_pf *pf = ldev->pf;
235 struct i40e_qv_info *qv_info;
236 u32 reg_idx;
237
238 qv_info = &qvlist_info->qv_info[i];
239 if (!qv_info)
240 continue;
241 reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1);
242 wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
243 }
244 kfree(ldev->qvlist_info);
245 ldev->qvlist_info = NULL;
246}
247
248/**
249 * i40e_notify_client_of_netdev_close - call the client close callback
250 * @vsi: the VSI with netdev closed
251 * @reset: true when close called due to a reset pending
252 *
253 * If there is a client to this netdev, call the client with close
254 **/
255void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
256{
257 struct i40e_client_instance *cdev;
258
259 if (!vsi)
260 return;
261 mutex_lock(&i40e_client_instance_mutex);
262 list_for_each_entry(cdev, &i40e_client_instances, list) {
263 if (cdev->lan_info.netdev == vsi->netdev) {
264 if (!cdev->client ||
265 !cdev->client->ops || !cdev->client->ops->close) {
266 dev_dbg(&vsi->back->pdev->dev,
267 "Cannot locate client instance close routine\n");
268 continue;
269 }
270 cdev->client->ops->close(&cdev->lan_info, cdev->client,
271 reset);
272 i40e_client_release_qvlist(&cdev->lan_info);
273 }
274 }
275 mutex_unlock(&i40e_client_instance_mutex);
276}
277
278/**
279 * i40e_notify_client_of_vf_reset - call the client vf reset callback
280 * @pf: PF device pointer
281 * @vf_id: asolute id of VF being reset
282 *
283 * If there is a client attached to this PF, notify when a VF is reset
284 **/
285void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
286{
287 struct i40e_client_instance *cdev;
288
289 if (!pf)
290 return;
291 mutex_lock(&i40e_client_instance_mutex);
292 list_for_each_entry(cdev, &i40e_client_instances, list) {
293 if (cdev->lan_info.pf == pf) {
294 if (!cdev->client ||
295 !cdev->client->ops ||
296 !cdev->client->ops->vf_reset) {
297 dev_dbg(&pf->pdev->dev,
298 "Cannot locate client instance VF reset routine\n");
299 continue;
300 }
301 cdev->client->ops->vf_reset(&cdev->lan_info,
302 cdev->client, vf_id);
303 }
304 }
305 mutex_unlock(&i40e_client_instance_mutex);
306}
307
308/**
309 * i40e_notify_client_of_vf_enable - call the client vf notification callback
310 * @pf: PF device pointer
311 * @num_vfs: the number of VFs currently enabled, 0 for disable
312 *
313 * If there is a client attached to this PF, call its VF notification routine
314 **/
315void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
316{
317 struct i40e_client_instance *cdev;
318
319 if (!pf)
320 return;
321 mutex_lock(&i40e_client_instance_mutex);
322 list_for_each_entry(cdev, &i40e_client_instances, list) {
323 if (cdev->lan_info.pf == pf) {
324 if (!cdev->client ||
325 !cdev->client->ops ||
326 !cdev->client->ops->vf_enable) {
327 dev_dbg(&pf->pdev->dev,
328 "Cannot locate client instance VF enable routine\n");
329 continue;
330 }
331 cdev->client->ops->vf_enable(&cdev->lan_info,
332 cdev->client, num_vfs);
333 }
334 }
335 mutex_unlock(&i40e_client_instance_mutex);
336}
337
338/**
339 * i40e_vf_client_capable - ask the client if it likes the specified VF
340 * @pf: PF device pointer
341 * @vf_id: the VF in question
342 *
343 * If there is a client of the specified type attached to this PF, call
344 * its vf_capable routine
345 **/
346int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
347 enum i40e_client_type type)
348{
349 struct i40e_client_instance *cdev;
350 int capable = false;
351
352 if (!pf)
353 return false;
354 mutex_lock(&i40e_client_instance_mutex);
355 list_for_each_entry(cdev, &i40e_client_instances, list) {
356 if (cdev->lan_info.pf == pf) {
357 if (!cdev->client ||
358 !cdev->client->ops ||
359 !cdev->client->ops->vf_capable ||
360 !(cdev->client->type == type)) {
361 dev_dbg(&pf->pdev->dev,
362 "Cannot locate client instance VF capability routine\n");
363 continue;
364 }
365 capable = cdev->client->ops->vf_capable(&cdev->lan_info,
366 cdev->client,
367 vf_id);
368 break;
369 }
370 }
371 mutex_unlock(&i40e_client_instance_mutex);
372 return capable;
373}
374
375/**
376 * i40e_vsi_lookup - finds a matching VSI from the PF list starting at start_vsi
377 * @pf: board private structure
378 * @type: vsi type
379 * @start_vsi: a VSI pointer from where to start the search
380 *
381 * Returns non NULL on success or NULL for failure
382 **/
383struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
384 enum i40e_vsi_type type,
385 struct i40e_vsi *start_vsi)
386{
387 struct i40e_vsi *vsi;
388 int i = 0;
389
390 if (start_vsi) {
391 for (i = 0; i < pf->num_alloc_vsi; i++) {
392 vsi = pf->vsi[i];
393 if (vsi == start_vsi)
394 break;
395 }
396 }
397 for (; i < pf->num_alloc_vsi; i++) {
398 vsi = pf->vsi[i];
399 if (vsi && vsi->type == type)
400 return vsi;
401 }
402
403 return NULL;
404}
405
406/**
407 * i40e_client_add_instance - add a client instance struct to the instance list
408 * @pf: pointer to the board struct
409 * @client: pointer to a client struct in the client list.
410 *
411 * Returns cdev ptr on success, NULL on failure
412 **/
413static
414struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
415 struct i40e_client *client)
416{
417 struct i40e_client_instance *cdev;
418 struct netdev_hw_addr *mac = NULL;
419 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
420
421 mutex_lock(&i40e_client_instance_mutex);
422 list_for_each_entry(cdev, &i40e_client_instances, list) {
423 if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
424 cdev = NULL;
425 goto out;
426 }
427 }
428 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
429 if (!cdev)
430 goto out;
431
432 cdev->lan_info.pf = (void *)pf;
433 cdev->lan_info.netdev = vsi->netdev;
434 cdev->lan_info.pcidev = pf->pdev;
435 cdev->lan_info.fid = pf->hw.pf_id;
436 cdev->lan_info.ftype = I40E_CLIENT_FTYPE_PF;
437 cdev->lan_info.hw_addr = pf->hw.hw_addr;
438 cdev->lan_info.ops = &i40e_lan_ops;
439 cdev->lan_info.version.major = I40E_CLIENT_VERSION_MAJOR;
440 cdev->lan_info.version.minor = I40E_CLIENT_VERSION_MINOR;
441 cdev->lan_info.version.build = I40E_CLIENT_VERSION_BUILD;
442 cdev->lan_info.fw_maj_ver = pf->hw.aq.fw_maj_ver;
443 cdev->lan_info.fw_min_ver = pf->hw.aq.fw_min_ver;
444 cdev->lan_info.fw_build = pf->hw.aq.fw_build;
445 set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state);
446
447 if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
448 kfree(cdev);
449 cdev = NULL;
450 goto out;
451 }
452
453 cdev->lan_info.msix_count = pf->num_iwarp_msix;
454 cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
455
456 mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
457 struct netdev_hw_addr, list);
458 if (mac)
459 ether_addr_copy(cdev->lan_info.lanmac, mac->addr);
460 else
461 dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
462
463 cdev->client = client;
464 INIT_LIST_HEAD(&cdev->list);
465 list_add(&cdev->list, &i40e_client_instances);
466out:
467 mutex_unlock(&i40e_client_instance_mutex);
468 return cdev;
469}
470
471/**
472 * i40e_client_del_instance - removes a client instance from the list
473 * @pf: pointer to the board struct
474 *
475 * Returns 0 on success or non-0 on error
476 **/
477static
478int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
479{
480 struct i40e_client_instance *cdev, *tmp;
481 int ret = -ENODEV;
482
483 mutex_lock(&i40e_client_instance_mutex);
484 list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
485 if ((cdev->lan_info.pf != pf) || (cdev->client != client))
486 continue;
487
488 dev_info(&pf->pdev->dev, "Deleted instance of Client %s, of dev %d bus=0x%02x func=0x%02x)\n",
489 client->name, pf->hw.pf_id,
490 pf->hw.bus.device, pf->hw.bus.func);
491 list_del(&cdev->list);
492 kfree(cdev);
493 ret = 0;
494 break;
495 }
496 mutex_unlock(&i40e_client_instance_mutex);
497 return ret;
498}
499
500/**
501 * i40e_client_subtask - client maintenance work
502 * @pf: board private structure
503 **/
504void i40e_client_subtask(struct i40e_pf *pf)
505{
506 struct i40e_client_instance *cdev;
507 struct i40e_client *client;
508 int ret = 0;
509
510 if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
511 return;
512 pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
513
514 /* If we're down or resetting, just bail */
515 if (test_bit(__I40E_DOWN, &pf->state) ||
516 test_bit(__I40E_CONFIG_BUSY, &pf->state))
517 return;
518
519 /* Check client state and instantiate client if client registered */
520 mutex_lock(&i40e_client_mutex);
521 list_for_each_entry(client, &i40e_clients, list) {
522 /* first check client is registered */
523 if (!test_bit(__I40E_CLIENT_REGISTERED, &client->state))
524 continue;
525
526 /* Do we also need the LAN VSI to be up, to create instance */
527 if (!(client->flags & I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE)) {
528 /* check if L2 VSI is up, if not we are not ready */
529 if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
530 continue;
531 }
532
533 /* Add the client instance to the instance list */
534 cdev = i40e_client_add_instance(pf, client);
535 if (!cdev)
536 continue;
537
538 /* Also up the ref_cnt of no. of instances of this client */
539 atomic_inc(&client->ref_cnt);
540 dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
541 client->name, pf->hw.pf_id,
542 pf->hw.bus.device, pf->hw.bus.func);
543
544 /* Send an Open request to the client */
545 atomic_inc(&cdev->ref_cnt);
546 if (client->ops && client->ops->open)
547 ret = client->ops->open(&cdev->lan_info, client);
548 atomic_dec(&cdev->ref_cnt);
549 if (!ret) {
550 set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
551 } else {
552 /* remove client instance */
553 i40e_client_del_instance(pf, client);
554 atomic_dec(&client->ref_cnt);
555 continue;
556 }
557 }
558 mutex_unlock(&i40e_client_mutex);
559}
560
561/**
562 * i40e_lan_add_device - add a lan device struct to the list of lan devices
563 * @pf: pointer to the board struct
564 *
565 * Returns 0 on success or none 0 on error
566 **/
567int i40e_lan_add_device(struct i40e_pf *pf)
568{
569 struct i40e_device *ldev;
570 int ret = 0;
571
572 mutex_lock(&i40e_device_mutex);
573 list_for_each_entry(ldev, &i40e_devices, list) {
574 if (ldev->pf == pf) {
575 ret = -EEXIST;
576 goto out;
577 }
578 }
579 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
580 if (!ldev) {
581 ret = -ENOMEM;
582 goto out;
583 }
584 ldev->pf = pf;
585 INIT_LIST_HEAD(&ldev->list);
586 list_add(&ldev->list, &i40e_devices);
587 dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x func=0x%02x\n",
588 pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
589
590 /* Since in some cases register may have happened before a device gets
591 * added, we can schedule a subtask to go initiate the clients.
592 */
593 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
594 i40e_service_event_schedule(pf);
595
596out:
597 mutex_unlock(&i40e_device_mutex);
598 return ret;
599}
600
601/**
602 * i40e_lan_del_device - removes a lan device from the device list
603 * @pf: pointer to the board struct
604 *
605 * Returns 0 on success or non-0 on error
606 **/
607int i40e_lan_del_device(struct i40e_pf *pf)
608{
609 struct i40e_device *ldev, *tmp;
610 int ret = -ENODEV;
611
612 mutex_lock(&i40e_device_mutex);
613 list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
614 if (ldev->pf == pf) {
615 dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x func=0x%02x\n",
616 pf->hw.pf_id, pf->hw.bus.device,
617 pf->hw.bus.func);
618 list_del(&ldev->list);
619 kfree(ldev);
620 ret = 0;
621 break;
622 }
623 }
624
625 mutex_unlock(&i40e_device_mutex);
626 return ret;
627}
628
629/**
630 * i40e_client_release - release client specific resources
631 * @client: pointer to the registered client
632 *
633 * Return 0 on success or < 0 on error
634 **/
635static int i40e_client_release(struct i40e_client *client)
636{
637 struct i40e_client_instance *cdev, *tmp;
638 struct i40e_pf *pf = NULL;
639 int ret = 0;
640
641 LIST_HEAD(cdevs_tmp);
642
643 mutex_lock(&i40e_client_instance_mutex);
644 list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
645 if (strncmp(cdev->client->name, client->name,
646 I40E_CLIENT_STR_LENGTH))
647 continue;
648 if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
649 if (atomic_read(&cdev->ref_cnt) > 0) {
650 ret = I40E_ERR_NOT_READY;
651 goto out;
652 }
653 pf = (struct i40e_pf *)cdev->lan_info.pf;
654 if (client->ops && client->ops->close)
655 client->ops->close(&cdev->lan_info, client,
656 false);
657 i40e_client_release_qvlist(&cdev->lan_info);
658 clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
659
660 dev_warn(&pf->pdev->dev,
661 "Client %s instance for PF id %d closed\n",
662 client->name, pf->hw.pf_id);
663 }
664 /* delete the client instance from the list */
665 list_del(&cdev->list);
666 list_add(&cdev->list, &cdevs_tmp);
667 atomic_dec(&client->ref_cnt);
668 dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
669 client->name);
670 }
671out:
672 mutex_unlock(&i40e_client_instance_mutex);
673
674 /* free the client device and release its vsi */
675 list_for_each_entry_safe(cdev, tmp, &cdevs_tmp, list) {
676 kfree(cdev);
677 }
678 return ret;
679}
680
681/**
682 * i40e_client_prepare - prepare client specific resources
683 * @client: pointer to the registered client
684 *
685 * Return 0 on success or < 0 on error
686 **/
687static int i40e_client_prepare(struct i40e_client *client)
688{
689 struct i40e_device *ldev;
690 struct i40e_pf *pf;
691 int ret = 0;
692
693 mutex_lock(&i40e_device_mutex);
694 list_for_each_entry(ldev, &i40e_devices, list) {
695 pf = ldev->pf;
696 /* Start the client subtask */
697 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
698 i40e_service_event_schedule(pf);
699 }
700 mutex_unlock(&i40e_device_mutex);
701 return ret;
702}
703
704/**
705 * i40e_client_virtchnl_send - TBD
706 * @ldev: pointer to L2 context
707 * @client: Client pointer
708 * @vf_id: absolute VF identifier
709 * @msg: message buffer
710 * @len: length of message buffer
711 *
712 * Return 0 on success or < 0 on error
713 **/
714static int i40e_client_virtchnl_send(struct i40e_info *ldev,
715 struct i40e_client *client,
716 u32 vf_id, u8 *msg, u16 len)
717{
718 struct i40e_pf *pf = ldev->pf;
719 struct i40e_hw *hw = &pf->hw;
720 i40e_status err;
721
722 err = i40e_aq_send_msg_to_vf(hw, vf_id, I40E_VIRTCHNL_OP_IWARP,
723 0, msg, len, NULL);
724 if (err)
725 dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
726 err, hw->aq.asq_last_status);
727
728 return err;
729}
730
731/**
732 * i40e_client_setup_qvlist
733 * @ldev: pointer to L2 context.
734 * @client: Client pointer.
735 * @qv_info: queue and vector list
736 *
737 * Return 0 on success or < 0 on error
738 **/
739static int i40e_client_setup_qvlist(struct i40e_info *ldev,
740 struct i40e_client *client,
741 struct i40e_qvlist_info *qvlist_info)
742{
743 struct i40e_pf *pf = ldev->pf;
744 struct i40e_hw *hw = &pf->hw;
745 struct i40e_qv_info *qv_info;
746 u32 v_idx, i, reg_idx, reg;
747 u32 size;
748
749 size = sizeof(struct i40e_qvlist_info) +
750 (sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1));
751 ldev->qvlist_info = kzalloc(size, GFP_KERNEL);
752 ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
753
754 for (i = 0; i < qvlist_info->num_vectors; i++) {
755 qv_info = &qvlist_info->qv_info[i];
756 if (!qv_info)
757 continue;
758 v_idx = qv_info->v_idx;
759
760 /* Validate vector id belongs to this client */
761 if ((v_idx >= (pf->iwarp_base_vector + pf->num_iwarp_msix)) ||
762 (v_idx < pf->iwarp_base_vector))
763 goto err;
764
765 ldev->qvlist_info->qv_info[i] = *qv_info;
766 reg_idx = I40E_PFINT_LNKLSTN(v_idx - 1);
767
768 if (qv_info->ceq_idx == I40E_QUEUE_INVALID_IDX) {
769 /* Special case - No CEQ mapped on this vector */
770 wr32(hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
771 } else {
772 reg = (qv_info->ceq_idx &
773 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
774 (I40E_QUEUE_TYPE_PE_CEQ <<
775 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
776 wr32(hw, reg_idx, reg);
777
778 reg = (I40E_PFINT_CEQCTL_CAUSE_ENA_MASK |
779 (v_idx << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) |
780 (qv_info->itr_idx <<
781 I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) |
782 (I40E_QUEUE_END_OF_LIST <<
783 I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT));
784 wr32(hw, I40E_PFINT_CEQCTL(qv_info->ceq_idx), reg);
785 }
786 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
787 reg = (I40E_PFINT_AEQCTL_CAUSE_ENA_MASK |
788 (v_idx << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) |
789 (qv_info->itr_idx <<
790 I40E_PFINT_AEQCTL_ITR_INDX_SHIFT));
791
792 wr32(hw, I40E_PFINT_AEQCTL, reg);
793 }
794 }
795
796 return 0;
797err:
798 kfree(ldev->qvlist_info);
799 ldev->qvlist_info = NULL;
800 return -EINVAL;
801}
802
803/**
804 * i40e_client_request_reset
805 * @ldev: pointer to L2 context.
806 * @client: Client pointer.
807 * @level: reset level
808 **/
809static void i40e_client_request_reset(struct i40e_info *ldev,
810 struct i40e_client *client,
811 u32 reset_level)
812{
813 struct i40e_pf *pf = ldev->pf;
814
815 switch (reset_level) {
816 case I40E_CLIENT_RESET_LEVEL_PF:
817 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
818 break;
819 case I40E_CLIENT_RESET_LEVEL_CORE:
820 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
821 break;
822 default:
823 dev_warn(&pf->pdev->dev,
824 "Client %s instance for PF id %d request an unsupported reset: %d.\n",
825 client->name, pf->hw.pf_id, reset_level);
826 break;
827 }
828
829 i40e_service_event_schedule(pf);
830}
831
832/**
833 * i40e_client_update_vsi_ctxt
834 * @ldev: pointer to L2 context.
835 * @client: Client pointer.
836 * @is_vf: if this for the VF
837 * @vf_id: if is_vf true this carries the vf_id
838 * @flag: Any device level setting that needs to be done for PE
839 * @valid_flag: Bits in this match up and enable changing of flag bits
840 *
841 * Return 0 on success or < 0 on error
842 **/
843static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
844 struct i40e_client *client,
845 bool is_vf, u32 vf_id,
846 u32 flag, u32 valid_flag)
847{
848 struct i40e_pf *pf = ldev->pf;
849 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
850 struct i40e_vsi_context ctxt;
851 bool update = true;
852 i40e_status err;
853
854 /* TODO: for now do not allow setting VF's VSI setting */
855 if (is_vf)
856 return -EINVAL;
857
858 ctxt.seid = pf->main_vsi_seid;
859 ctxt.pf_num = pf->hw.pf_id;
860 err = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
861 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
862 if (err) {
863 dev_info(&pf->pdev->dev,
864 "couldn't get PF vsi config, err %s aq_err %s\n",
865 i40e_stat_str(&pf->hw, err),
866 i40e_aq_str(&pf->hw,
867 pf->hw.aq.asq_last_status));
868 return -ENOENT;
869 }
870
871 if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
872 (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
873 ctxt.info.valid_sections =
874 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
875 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
876 } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
877 !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
878 ctxt.info.valid_sections =
879 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
880 ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
881 } else {
882 update = false;
883 dev_warn(&pf->pdev->dev,
884 "Client %s instance for PF id %d request an unsupported Config: %x.\n",
885 client->name, pf->hw.pf_id, flag);
886 }
887
888 if (update) {
889 err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
890 if (err) {
891 dev_info(&pf->pdev->dev,
892 "update VSI ctxt for PE failed, err %s aq_err %s\n",
893 i40e_stat_str(&pf->hw, err),
894 i40e_aq_str(&pf->hw,
895 pf->hw.aq.asq_last_status));
896 }
897 }
898 return err;
899}
900
901/**
902 * i40e_register_client - Register a i40e client driver with the L2 driver
903 * @client: pointer to the i40e_client struct
904 *
905 * Returns 0 on success or non-0 on error
906 **/
907int i40e_register_client(struct i40e_client *client)
908{
909 int ret = 0;
910 enum i40e_vsi_type vsi_type;
911
912 if (!client) {
913 ret = -EIO;
914 goto out;
915 }
916
917 if (strlen(client->name) == 0) {
918 pr_info("i40e: Failed to register client with no name\n");
919 ret = -EIO;
920 goto out;
921 }
922
923 mutex_lock(&i40e_client_mutex);
924 if (i40e_client_is_registered(client)) {
925 pr_info("i40e: Client %s has already been registered!\n",
926 client->name);
927 mutex_unlock(&i40e_client_mutex);
928 ret = -EEXIST;
929 goto out;
930 }
931
932 if ((client->version.major != I40E_CLIENT_VERSION_MAJOR) ||
933 (client->version.minor != I40E_CLIENT_VERSION_MINOR)) {
934 pr_info("i40e: Failed to register client %s due to mismatched client interface version\n",
935 client->name);
936 pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
937 client->version.major, client->version.minor,
938 client->version.build,
939 i40e_client_interface_version_str);
940 mutex_unlock(&i40e_client_mutex);
941 ret = -EIO;
942 goto out;
943 }
944
945 vsi_type = i40e_client_type_to_vsi_type(client->type);
946 if (vsi_type == I40E_VSI_TYPE_UNKNOWN) {
947 pr_info("i40e: Failed to register client %s due to unknown client type %d\n",
948 client->name, client->type);
949 mutex_unlock(&i40e_client_mutex);
950 ret = -EIO;
951 goto out;
952 }
953 list_add(&client->list, &i40e_clients);
954 set_bit(__I40E_CLIENT_REGISTERED, &client->state);
955 mutex_unlock(&i40e_client_mutex);
956
957 if (i40e_client_prepare(client)) {
958 ret = -EIO;
959 goto out;
960 }
961
962 pr_info("i40e: Registered client %s with return code %d\n",
963 client->name, ret);
964out:
965 return ret;
966}
967EXPORT_SYMBOL(i40e_register_client);
968
969/**
970 * i40e_unregister_client - Unregister a i40e client driver with the L2 driver
971 * @client: pointer to the i40e_client struct
972 *
973 * Returns 0 on success or non-0 on error
974 **/
975int i40e_unregister_client(struct i40e_client *client)
976{
977 int ret = 0;
978
979 /* When a unregister request comes through we would have to send
980 * a close for each of the client instances that were opened.
981 * client_release function is called to handle this.
982 */
983 if (!client || i40e_client_release(client)) {
984 ret = -EIO;
985 goto out;
986 }
987
988 /* TODO: check if device is in reset, or if that matters? */
989 mutex_lock(&i40e_client_mutex);
990 if (!i40e_client_is_registered(client)) {
991 pr_info("i40e: Client %s has not been registered\n",
992 client->name);
993 mutex_unlock(&i40e_client_mutex);
994 ret = -ENODEV;
995 goto out;
996 }
997 if (atomic_read(&client->ref_cnt) == 0) {
998 clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
999 list_del(&client->list);
1000 pr_info("i40e: Unregistered client %s with return code %d\n",
1001 client->name, ret);
1002 } else {
1003 ret = I40E_ERR_NOT_READY;
1004 pr_err("i40e: Client %s failed unregister - client has open instances\n",
1005 client->name);
1006 }
1007
1008 mutex_unlock(&i40e_client_mutex);
1009out:
1010 return ret;
1011}
1012EXPORT_SYMBOL(i40e_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
new file mode 100644
index 000000000000..bf6b453d93a1
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -0,0 +1,232 @@
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27#ifndef _I40E_CLIENT_H_
28#define _I40E_CLIENT_H_
29
30#define I40E_CLIENT_STR_LENGTH 10
31
32/* Client interface version should be updated anytime there is a change in the
33 * existing APIs or data structures.
34 */
35#define I40E_CLIENT_VERSION_MAJOR 0
36#define I40E_CLIENT_VERSION_MINOR 01
37#define I40E_CLIENT_VERSION_BUILD 00
38#define I40E_CLIENT_VERSION_STR \
39 XSTRINGIFY(I40E_CLIENT_VERSION_MAJOR) "." \
40 XSTRINGIFY(I40E_CLIENT_VERSION_MINOR) "." \
41 XSTRINGIFY(I40E_CLIENT_VERSION_BUILD)
42
43struct i40e_client_version {
44 u8 major;
45 u8 minor;
46 u8 build;
47 u8 rsvd;
48};
49
50enum i40e_client_state {
51 __I40E_CLIENT_NULL,
52 __I40E_CLIENT_REGISTERED
53};
54
55enum i40e_client_instance_state {
56 __I40E_CLIENT_INSTANCE_NONE,
57 __I40E_CLIENT_INSTANCE_OPENED,
58};
59
60enum i40e_client_type {
61 I40E_CLIENT_IWARP,
62 I40E_CLIENT_VMDQ2
63};
64
65struct i40e_ops;
66struct i40e_client;
67
68/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
69 * In order for us to keep the interface simple, SW will define a
70 * unique type value for AEQ.
71 */
72#define I40E_QUEUE_TYPE_PE_AEQ 0x80
73#define I40E_QUEUE_INVALID_IDX 0xFFFF
74
75struct i40e_qv_info {
76 u32 v_idx; /* msix_vector */
77 u16 ceq_idx;
78 u16 aeq_idx;
79 u8 itr_idx;
80};
81
82struct i40e_qvlist_info {
83 u32 num_vectors;
84 struct i40e_qv_info qv_info[1];
85};
86
87#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
88
89/* set of LAN parameters useful for clients managed by LAN */
90
91/* Struct to hold per priority info */
92struct i40e_prio_qos_params {
93 u16 qs_handle; /* qs handle for prio */
94 u8 tc; /* TC mapped to prio */
95 u8 reserved;
96};
97
98#define I40E_CLIENT_MAX_USER_PRIORITY 8
99/* Struct to hold Client QoS */
100struct i40e_qos_params {
101 struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
102};
103
104struct i40e_params {
105 struct i40e_qos_params qos;
106 u16 mtu;
107};
108
109/* Structure to hold Lan device info for a client device */
110struct i40e_info {
111 struct i40e_client_version version;
112 u8 lanmac[6];
113 struct net_device *netdev;
114 struct pci_dev *pcidev;
115 u8 __iomem *hw_addr;
116 u8 fid; /* function id, PF id or VF id */
117#define I40E_CLIENT_FTYPE_PF 0
118#define I40E_CLIENT_FTYPE_VF 1
119 u8 ftype; /* function type, PF or VF */
120 void *pf;
121
122 /* All L2 params that could change during the life span of the PF
123 * and needs to be communicated to the client when they change
124 */
125 struct i40e_qvlist_info *qvlist_info;
126 struct i40e_params params;
127 struct i40e_ops *ops;
128
129 u16 msix_count; /* number of msix vectors*/
130 /* Array down below will be dynamically allocated based on msix_count */
131 struct msix_entry *msix_entries;
132 u16 itr_index; /* Which ITR index the PE driver is suppose to use */
133 u16 fw_maj_ver; /* firmware major version */
134 u16 fw_min_ver; /* firmware minor version */
135 u32 fw_build; /* firmware build number */
136};
137
138#define I40E_CLIENT_RESET_LEVEL_PF 1
139#define I40E_CLIENT_RESET_LEVEL_CORE 2
140#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1)
141
142struct i40e_ops {
143 /* setup_q_vector_list enables queues with a particular vector */
144 int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
145 struct i40e_qvlist_info *qv_info);
146
147 int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
148 u32 vf_id, u8 *msg, u16 len);
149
150 /* If the PE Engine is unresponsive, RDMA driver can request a reset.
151 * The level helps determine the level of reset being requested.
152 */
153 void (*request_reset)(struct i40e_info *ldev,
154 struct i40e_client *client, u32 level);
155
156 /* API for the RDMA driver to set certain VSI flags that control
157 * PE Engine.
158 */
159 int (*update_vsi_ctxt)(struct i40e_info *ldev,
160 struct i40e_client *client,
161 bool is_vf, u32 vf_id,
162 u32 flag, u32 valid_flag);
163};
164
165struct i40e_client_ops {
166 /* Should be called from register_client() or whenever PF is ready
167 * to create a specific client instance.
168 */
169 int (*open)(struct i40e_info *ldev, struct i40e_client *client);
170
171 /* Should be called when netdev is unavailable or when unregister
172 * call comes in. If the close is happenening due to a reset being
173 * triggered set the reset bit to true.
174 */
175 void (*close)(struct i40e_info *ldev, struct i40e_client *client,
176 bool reset);
177
178 /* called when some l2 managed parameters changes - mtu */
179 void (*l2_param_change)(struct i40e_info *ldev,
180 struct i40e_client *client,
181 struct i40e_params *params);
182
183 int (*virtchnl_receive)(struct i40e_info *ldev,
184 struct i40e_client *client, u32 vf_id,
185 u8 *msg, u16 len);
186
187 /* called when a VF is reset by the PF */
188 void (*vf_reset)(struct i40e_info *ldev,
189 struct i40e_client *client, u32 vf_id);
190
191 /* called when the number of VFs changes */
192 void (*vf_enable)(struct i40e_info *ldev,
193 struct i40e_client *client, u32 num_vfs);
194
195 /* returns true if VF is capable of specified offload */
196 int (*vf_capable)(struct i40e_info *ldev,
197 struct i40e_client *client, u32 vf_id);
198};
199
200/* Client device */
201struct i40e_client_instance {
202 struct list_head list;
203 struct i40e_info lan_info;
204 struct i40e_client *client;
205 unsigned long state;
206 /* A count of all the in-progress calls to the client */
207 atomic_t ref_cnt;
208};
209
210struct i40e_client {
211 struct list_head list; /* list of registered clients */
212 char name[I40E_CLIENT_STR_LENGTH];
213 struct i40e_client_version version;
214 unsigned long state; /* client state */
215 atomic_t ref_cnt; /* Count of all the client devices of this kind */
216 u32 flags;
217#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
218#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
219 enum i40e_client_type type;
220 struct i40e_client_ops *ops; /* client ops provided by the client */
221};
222
223static inline bool i40e_client_is_registered(struct i40e_client *client)
224{
225 return test_bit(__I40E_CLIENT_REGISTERED, &client->state);
226}
227
228/* used by clients */
229int i40e_register_client(struct i40e_client *client);
230int i40e_unregister_client(struct i40e_client *client);
231
232#endif /* _I40E_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 4596294c2ab1..f3c1d8890cbb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -60,6 +60,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
60 case I40E_DEV_ID_SFP_X722: 60 case I40E_DEV_ID_SFP_X722:
61 case I40E_DEV_ID_1G_BASE_T_X722: 61 case I40E_DEV_ID_1G_BASE_T_X722:
62 case I40E_DEV_ID_10G_BASE_T_X722: 62 case I40E_DEV_ID_10G_BASE_T_X722:
63 case I40E_DEV_ID_SFP_I_X722:
63 hw->mac.type = I40E_MAC_X722; 64 hw->mac.type = I40E_MAC_X722;
64 break; 65 break;
65 default: 66 default:
@@ -1901,13 +1902,13 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1901 * 1902 *
1902 * Reset the external PHY. 1903 * Reset the external PHY.
1903 **/ 1904 **/
1904enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1905i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1905 struct i40e_asq_cmd_details *cmd_details) 1906 struct i40e_asq_cmd_details *cmd_details)
1906{ 1907{
1907 struct i40e_aq_desc desc; 1908 struct i40e_aq_desc desc;
1908 struct i40e_aqc_set_phy_debug *cmd = 1909 struct i40e_aqc_set_phy_debug *cmd =
1909 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1910 (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1910 enum i40e_status_code status; 1911 i40e_status status;
1911 1912
1912 i40e_fill_default_direct_cmd_desc(&desc, 1913 i40e_fill_default_direct_cmd_desc(&desc,
1913 i40e_aqc_opc_set_phy_debug); 1914 i40e_aqc_opc_set_phy_debug);
@@ -2157,6 +2158,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2157 struct i40e_aq_desc desc; 2158 struct i40e_aq_desc desc;
2158 struct i40e_aqc_add_get_update_vsi *cmd = 2159 struct i40e_aqc_add_get_update_vsi *cmd =
2159 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2160 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2161 struct i40e_aqc_add_get_update_vsi_completion *resp =
2162 (struct i40e_aqc_add_get_update_vsi_completion *)
2163 &desc.params.raw;
2160 i40e_status status; 2164 i40e_status status;
2161 2165
2162 i40e_fill_default_direct_cmd_desc(&desc, 2166 i40e_fill_default_direct_cmd_desc(&desc,
@@ -2168,6 +2172,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2168 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2172 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2169 sizeof(vsi_ctx->info), cmd_details); 2173 sizeof(vsi_ctx->info), cmd_details);
2170 2174
2175 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2176 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2177
2171 return status; 2178 return status;
2172} 2179}
2173 2180
@@ -3073,6 +3080,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3073 break; 3080 break;
3074 case I40E_AQ_CAP_ID_MSIX: 3081 case I40E_AQ_CAP_ID_MSIX:
3075 p->num_msix_vectors = number; 3082 p->num_msix_vectors = number;
3083 i40e_debug(hw, I40E_DEBUG_INIT,
3084 "HW Capability: MSIX vector count = %d\n",
3085 p->num_msix_vectors);
3076 break; 3086 break;
3077 case I40E_AQ_CAP_ID_VF_MSIX: 3087 case I40E_AQ_CAP_ID_VF_MSIX:
3078 p->num_msix_vectors_vf = number; 3088 p->num_msix_vectors_vf = number;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 0c97733d253c..83dccf1792e7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -147,9 +147,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
147 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", 147 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
148 (unsigned long int)nd->vlan_features); 148 (unsigned long int)nd->vlan_features);
149 } 149 }
150 if (vsi->active_vlans) 150 dev_info(&pf->pdev->dev,
151 dev_info(&pf->pdev->dev, 151 " vlgrp: & = %p\n", vsi->active_vlans);
152 " vlgrp: & = %p\n", vsi->active_vlans);
153 dev_info(&pf->pdev->dev, 152 dev_info(&pf->pdev->dev,
154 " state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", 153 " state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
155 vsi->state, vsi->flags, 154 vsi->state, vsi->flags,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 99257fcd1ef4..dd4457d29e98 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -44,6 +44,7 @@
44#define I40E_DEV_ID_SFP_X722 0x37D0 44#define I40E_DEV_ID_SFP_X722 0x37D0
45#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 45#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
46#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 46#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
47#define I40E_DEV_ID_SFP_I_X722 0x37D3
47 48
48#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ 49#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
49 (d) == I40E_DEV_ID_QSFP_B || \ 50 (d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 784b1659457a..8a83d4514812 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -313,6 +313,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
313 ecmd->advertising |= ADVERTISED_10000baseT_Full; 313 ecmd->advertising |= ADVERTISED_10000baseT_Full;
314 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) 314 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
315 ecmd->advertising |= ADVERTISED_1000baseT_Full; 315 ecmd->advertising |= ADVERTISED_1000baseT_Full;
316 /* adding 100baseT support for 10GBASET_PHY */
317 if (pf->flags & I40E_FLAG_HAVE_10GBASET_PHY) {
318 ecmd->supported |= SUPPORTED_100baseT_Full;
319 ecmd->advertising |= ADVERTISED_100baseT_Full |
320 ADVERTISED_1000baseT_Full |
321 ADVERTISED_10000baseT_Full;
322 }
316 break; 323 break;
317 case I40E_PHY_TYPE_1000BASE_T_OPTICAL: 324 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
318 ecmd->supported = SUPPORTED_Autoneg | 325 ecmd->supported = SUPPORTED_Autoneg |
@@ -325,6 +332,15 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
325 SUPPORTED_100baseT_Full; 332 SUPPORTED_100baseT_Full;
326 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) 333 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
327 ecmd->advertising |= ADVERTISED_100baseT_Full; 334 ecmd->advertising |= ADVERTISED_100baseT_Full;
335 /* firmware detects 10G phy as 100M phy at 100M speed */
336 if (pf->flags & I40E_FLAG_HAVE_10GBASET_PHY) {
337 ecmd->supported |= SUPPORTED_10000baseT_Full |
338 SUPPORTED_1000baseT_Full;
339 ecmd->advertising |= ADVERTISED_Autoneg |
340 ADVERTISED_100baseT_Full |
341 ADVERTISED_1000baseT_Full |
342 ADVERTISED_10000baseT_Full;
343 }
328 break; 344 break;
329 case I40E_PHY_TYPE_10GBASE_CR1_CU: 345 case I40E_PHY_TYPE_10GBASE_CR1_CU:
330 case I40E_PHY_TYPE_10GBASE_CR1: 346 case I40E_PHY_TYPE_10GBASE_CR1:
@@ -1714,7 +1730,7 @@ static void i40e_diag_test(struct net_device *netdev,
1714 /* If the device is online then take it offline */ 1730 /* If the device is online then take it offline */
1715 if (if_running) 1731 if (if_running)
1716 /* indicate we're in test mode */ 1732 /* indicate we're in test mode */
1717 dev_close(netdev); 1733 i40e_close(netdev);
1718 else 1734 else
1719 /* This reset does not affect link - if it is 1735 /* This reset does not affect link - if it is
1720 * changed to a type of reset that does affect 1736 * changed to a type of reset that does affect
@@ -1743,7 +1759,7 @@ static void i40e_diag_test(struct net_device *netdev,
1743 i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); 1759 i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
1744 1760
1745 if (if_running) 1761 if (if_running)
1746 dev_open(netdev); 1762 i40e_open(netdev);
1747 } else { 1763 } else {
1748 /* Online tests */ 1764 /* Online tests */
1749 netif_info(pf, drv, netdev, "online testing starting\n"); 1765 netif_info(pf, drv, netdev, "online testing starting\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 8ad162c16f61..58e6c1570335 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver 3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2015 Intel Corporation. 4 * Copyright(c) 2013 - 2016 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -38,16 +38,6 @@
38#include "i40e_fcoe.h" 38#include "i40e_fcoe.h"
39 39
40/** 40/**
41 * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
42 * @ptype: the packet type field from rx descriptor write-back
43 **/
44static inline bool i40e_rx_is_fcoe(u16 ptype)
45{
46 return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
47 (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
48}
49
50/**
51 * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF 41 * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
52 * @sof: the FCoE start of frame delimiter 42 * @sof: the FCoE start of frame delimiter
53 **/ 43 **/
@@ -1371,7 +1361,7 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
1371 if (i40e_chk_linearize(skb, count)) { 1361 if (i40e_chk_linearize(skb, count)) {
1372 if (__skb_linearize(skb)) 1362 if (__skb_linearize(skb))
1373 goto out_drop; 1363 goto out_drop;
1374 count = TXD_USE_COUNT(skb->len); 1364 count = i40e_txd_use_count(skb->len);
1375 tx_ring->tx_stats.tx_linearize++; 1365 tx_ring->tx_stats.tx_linearize++;
1376 } 1366 }
1377 1367
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 70d9605a0d9e..0b071cea305d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -45,8 +45,8 @@ static const char i40e_driver_string[] =
45#define DRV_KERN "-k" 45#define DRV_KERN "-k"
46 46
47#define DRV_VERSION_MAJOR 1 47#define DRV_VERSION_MAJOR 1
48#define DRV_VERSION_MINOR 4 48#define DRV_VERSION_MINOR 5
49#define DRV_VERSION_BUILD 25 49#define DRV_VERSION_BUILD 5
50#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 50#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
51 __stringify(DRV_VERSION_MINOR) "." \ 51 __stringify(DRV_VERSION_MINOR) "." \
52 __stringify(DRV_VERSION_BUILD) DRV_KERN 52 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -90,6 +90,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, 90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, 91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, 92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, 95 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
95 /* required last entry */ 96 /* required last entry */
@@ -289,7 +290,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
289 * 290 *
290 * If not already scheduled, this puts the task into the work queue 291 * If not already scheduled, this puts the task into the work queue
291 **/ 292 **/
292static void i40e_service_event_schedule(struct i40e_pf *pf) 293void i40e_service_event_schedule(struct i40e_pf *pf)
293{ 294{
294 if (!test_bit(__I40E_DOWN, &pf->state) && 295 if (!test_bit(__I40E_DOWN, &pf->state) &&
295 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && 296 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
@@ -2230,7 +2231,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2230 netdev->mtu = new_mtu; 2231 netdev->mtu = new_mtu;
2231 if (netif_running(netdev)) 2232 if (netif_running(netdev))
2232 i40e_vsi_reinit_locked(vsi); 2233 i40e_vsi_reinit_locked(vsi);
2233 2234 i40e_notify_client_of_l2_param_changes(vsi);
2234 return 0; 2235 return 0;
2235} 2236}
2236 2237
@@ -4164,11 +4165,14 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4164 int i; 4165 int i;
4165 4166
4166 i40e_stop_misc_vector(pf); 4167 i40e_stop_misc_vector(pf);
4167 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4168 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4168 synchronize_irq(pf->msix_entries[0].vector); 4169 synchronize_irq(pf->msix_entries[0].vector);
4169 free_irq(pf->msix_entries[0].vector, pf); 4170 free_irq(pf->msix_entries[0].vector, pf);
4170 } 4171 }
4171 4172
4173 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4174 I40E_IWARP_IRQ_PILE_ID);
4175
4172 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 4176 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4173 for (i = 0; i < pf->num_alloc_vsi; i++) 4177 for (i = 0; i < pf->num_alloc_vsi; i++)
4174 if (pf->vsi[i]) 4178 if (pf->vsi[i])
@@ -4212,12 +4216,17 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4212 **/ 4216 **/
4213static void i40e_vsi_close(struct i40e_vsi *vsi) 4217static void i40e_vsi_close(struct i40e_vsi *vsi)
4214{ 4218{
4219 bool reset = false;
4220
4215 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 4221 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4216 i40e_down(vsi); 4222 i40e_down(vsi);
4217 i40e_vsi_free_irq(vsi); 4223 i40e_vsi_free_irq(vsi);
4218 i40e_vsi_free_tx_resources(vsi); 4224 i40e_vsi_free_tx_resources(vsi);
4219 i40e_vsi_free_rx_resources(vsi); 4225 i40e_vsi_free_rx_resources(vsi);
4220 vsi->current_netdev_flags = 0; 4226 vsi->current_netdev_flags = 0;
4227 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4228 reset = true;
4229 i40e_notify_client_of_netdev_close(vsi, reset);
4221} 4230}
4222 4231
4223/** 4232/**
@@ -4850,6 +4859,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4850 ctxt.info = vsi->info; 4859 ctxt.info = vsi->info;
4851 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 4860 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4852 4861
4862 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
4863 ctxt.info.valid_sections |=
4864 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
4865 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
4866 }
4867
4853 /* Update the VSI after updating the VSI queue-mapping information */ 4868 /* Update the VSI after updating the VSI queue-mapping information */
4854 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 4869 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4855 if (ret) { 4870 if (ret) {
@@ -4993,6 +5008,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4993 if (pf->vsi[v]->netdev) 5008 if (pf->vsi[v]->netdev)
4994 i40e_dcbnl_set_all(pf->vsi[v]); 5009 i40e_dcbnl_set_all(pf->vsi[v]);
4995 } 5010 }
5011 i40e_notify_client_of_l2_param_changes(pf->vsi[v]);
4996 } 5012 }
4997} 5013}
4998 5014
@@ -5191,6 +5207,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
5191 } 5207 }
5192 i40e_fdir_filter_restore(vsi); 5208 i40e_fdir_filter_restore(vsi);
5193 } 5209 }
5210
5211 /* On the next run of the service_task, notify any clients of the new
5212 * opened netdev
5213 */
5214 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
5194 i40e_service_event_schedule(pf); 5215 i40e_service_event_schedule(pf);
5195 5216
5196 return 0; 5217 return 0;
@@ -5379,6 +5400,8 @@ int i40e_open(struct net_device *netdev)
5379 geneve_get_rx_port(netdev); 5400 geneve_get_rx_port(netdev);
5380#endif 5401#endif
5381 5402
5403 i40e_notify_client_of_netdev_open(vsi);
5404
5382 return 0; 5405 return 0;
5383} 5406}
5384 5407
@@ -5487,11 +5510,7 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5487 * 5510 *
5488 * Returns 0, this is not allowed to fail 5511 * Returns 0, this is not allowed to fail
5489 **/ 5512 **/
5490#ifdef I40E_FCOE
5491int i40e_close(struct net_device *netdev) 5513int i40e_close(struct net_device *netdev)
5492#else
5493static int i40e_close(struct net_device *netdev)
5494#endif
5495{ 5514{
5496 struct i40e_netdev_priv *np = netdev_priv(netdev); 5515 struct i40e_netdev_priv *np = netdev_priv(netdev);
5497 struct i40e_vsi *vsi = np->vsi; 5516 struct i40e_vsi *vsi = np->vsi;
@@ -5516,8 +5535,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5516 5535
5517 WARN_ON(in_interrupt()); 5536 WARN_ON(in_interrupt());
5518 5537
5519 if (i40e_check_asq_alive(&pf->hw))
5520 i40e_vc_notify_reset(pf);
5521 5538
5522 /* do the biggest reset indicated */ 5539 /* do the biggest reset indicated */
5523 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { 5540 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -6043,6 +6060,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
6043 case I40E_VSI_SRIOV: 6060 case I40E_VSI_SRIOV:
6044 case I40E_VSI_VMDQ2: 6061 case I40E_VSI_VMDQ2:
6045 case I40E_VSI_CTRL: 6062 case I40E_VSI_CTRL:
6063 case I40E_VSI_IWARP:
6046 case I40E_VSI_MIRROR: 6064 case I40E_VSI_MIRROR:
6047 default: 6065 default:
6048 /* there is no notification for other VSIs */ 6066 /* there is no notification for other VSIs */
@@ -6354,7 +6372,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6354 break; 6372 break;
6355 default: 6373 default:
6356 dev_info(&pf->pdev->dev, 6374 dev_info(&pf->pdev->dev,
6357 "ARQ Error: Unknown event 0x%04x received\n", 6375 "ARQ: Unknown event 0x%04x ignored\n",
6358 opcode); 6376 opcode);
6359 break; 6377 break;
6360 } 6378 }
@@ -6719,6 +6737,8 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
6719 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 6737 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6720 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 6738 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6721 return; 6739 return;
6740 if (i40e_check_asq_alive(&pf->hw))
6741 i40e_vc_notify_reset(pf);
6722 6742
6723 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 6743 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6724 6744
@@ -6839,6 +6859,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6839 */ 6859 */
6840 ret = i40e_aq_set_phy_int_mask(&pf->hw, 6860 ret = i40e_aq_set_phy_int_mask(&pf->hw,
6841 ~(I40E_AQ_EVENT_LINK_UPDOWN | 6861 ~(I40E_AQ_EVENT_LINK_UPDOWN |
6862 I40E_AQ_EVENT_MEDIA_NA |
6842 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); 6863 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
6843 if (ret) 6864 if (ret)
6844 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 6865 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
@@ -7148,6 +7169,7 @@ static void i40e_service_task(struct work_struct *work)
7148 i40e_vc_process_vflr_event(pf); 7169 i40e_vc_process_vflr_event(pf);
7149 i40e_watchdog_subtask(pf); 7170 i40e_watchdog_subtask(pf);
7150 i40e_fdir_reinit_subtask(pf); 7171 i40e_fdir_reinit_subtask(pf);
7172 i40e_client_subtask(pf);
7151 i40e_sync_filters_subtask(pf); 7173 i40e_sync_filters_subtask(pf);
7152 i40e_sync_udp_filters_subtask(pf); 7174 i40e_sync_udp_filters_subtask(pf);
7153 i40e_clean_adminq_subtask(pf); 7175 i40e_clean_adminq_subtask(pf);
@@ -7550,6 +7572,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
7550 int vectors_left; 7572 int vectors_left;
7551 int v_budget, i; 7573 int v_budget, i;
7552 int v_actual; 7574 int v_actual;
7575 int iwarp_requested = 0;
7553 7576
7554 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 7577 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7555 return -ENODEV; 7578 return -ENODEV;
@@ -7563,6 +7586,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
7563 * is governed by number of cpus in the system. 7586 * is governed by number of cpus in the system.
7564 * - assumes symmetric Tx/Rx pairing 7587 * - assumes symmetric Tx/Rx pairing
7565 * - The number of VMDq pairs 7588 * - The number of VMDq pairs
7589 * - The CPU count within the NUMA node if iWARP is enabled
7566#ifdef I40E_FCOE 7590#ifdef I40E_FCOE
7567 * - The number of FCOE qps. 7591 * - The number of FCOE qps.
7568#endif 7592#endif
@@ -7609,6 +7633,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
7609 } 7633 }
7610 7634
7611#endif 7635#endif
7636 /* can we reserve enough for iWARP? */
7637 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7638 if (!vectors_left)
7639 pf->num_iwarp_msix = 0;
7640 else if (vectors_left < pf->num_iwarp_msix)
7641 pf->num_iwarp_msix = 1;
7642 v_budget += pf->num_iwarp_msix;
7643 vectors_left -= pf->num_iwarp_msix;
7644 }
7645
7612 /* any vectors left over go for VMDq support */ 7646 /* any vectors left over go for VMDq support */
7613 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { 7647 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7614 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; 7648 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
@@ -7643,6 +7677,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
7643 * of these features based on the policy and at the end disable 7677 * of these features based on the policy and at the end disable
7644 * the features that did not get any vectors. 7678 * the features that did not get any vectors.
7645 */ 7679 */
7680 iwarp_requested = pf->num_iwarp_msix;
7681 pf->num_iwarp_msix = 0;
7646#ifdef I40E_FCOE 7682#ifdef I40E_FCOE
7647 pf->num_fcoe_qps = 0; 7683 pf->num_fcoe_qps = 0;
7648 pf->num_fcoe_msix = 0; 7684 pf->num_fcoe_msix = 0;
@@ -7681,17 +7717,33 @@ static int i40e_init_msix(struct i40e_pf *pf)
7681 pf->num_lan_msix = 1; 7717 pf->num_lan_msix = 1;
7682 break; 7718 break;
7683 case 3: 7719 case 3:
7720 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7721 pf->num_lan_msix = 1;
7722 pf->num_iwarp_msix = 1;
7723 } else {
7724 pf->num_lan_msix = 2;
7725 }
7684#ifdef I40E_FCOE 7726#ifdef I40E_FCOE
7685 /* give one vector to FCoE */ 7727 /* give one vector to FCoE */
7686 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7728 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7687 pf->num_lan_msix = 1; 7729 pf->num_lan_msix = 1;
7688 pf->num_fcoe_msix = 1; 7730 pf->num_fcoe_msix = 1;
7689 } 7731 }
7690#else
7691 pf->num_lan_msix = 2;
7692#endif 7732#endif
7693 break; 7733 break;
7694 default: 7734 default:
7735 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7736 pf->num_iwarp_msix = min_t(int, (vec / 3),
7737 iwarp_requested);
7738 pf->num_vmdq_vsis = min_t(int, (vec / 3),
7739 I40E_DEFAULT_NUM_VMDQ_VSI);
7740 } else {
7741 pf->num_vmdq_vsis = min_t(int, (vec / 2),
7742 I40E_DEFAULT_NUM_VMDQ_VSI);
7743 }
7744 pf->num_lan_msix = min_t(int,
7745 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
7746 pf->num_lan_msix);
7695#ifdef I40E_FCOE 7747#ifdef I40E_FCOE
7696 /* give one vector to FCoE */ 7748 /* give one vector to FCoE */
7697 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7749 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
@@ -7699,8 +7751,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
7699 vec--; 7751 vec--;
7700 } 7752 }
7701#endif 7753#endif
7702 /* give the rest to the PF */
7703 pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
7704 break; 7754 break;
7705 } 7755 }
7706 } 7756 }
@@ -7710,6 +7760,12 @@ static int i40e_init_msix(struct i40e_pf *pf)
7710 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); 7760 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7711 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 7761 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7712 } 7762 }
7763
7764 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
7765 (pf->num_iwarp_msix == 0)) {
7766 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
7767 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
7768 }
7713#ifdef I40E_FCOE 7769#ifdef I40E_FCOE
7714 7770
7715 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { 7771 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
@@ -7801,6 +7857,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7801 vectors = i40e_init_msix(pf); 7857 vectors = i40e_init_msix(pf);
7802 if (vectors < 0) { 7858 if (vectors < 0) {
7803 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 7859 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
7860 I40E_FLAG_IWARP_ENABLED |
7804#ifdef I40E_FCOE 7861#ifdef I40E_FCOE
7805 I40E_FLAG_FCOE_ENABLED | 7862 I40E_FLAG_FCOE_ENABLED |
7806#endif 7863#endif
@@ -8381,7 +8438,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
8381 8438
8382 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, 8439 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
8383 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); 8440 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
8384 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
8385 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { 8441 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
8386 if (I40E_DEBUG_USER & debug) 8442 if (I40E_DEBUG_USER & debug)
8387 pf->hw.debug_mask = debug; 8443 pf->hw.debug_mask = debug;
@@ -8392,7 +8448,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
8392 /* Set default capability flags */ 8448 /* Set default capability flags */
8393 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 8449 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8394 I40E_FLAG_MSI_ENABLED | 8450 I40E_FLAG_MSI_ENABLED |
8395 I40E_FLAG_LINK_POLLING_ENABLED |
8396 I40E_FLAG_MSIX_ENABLED; 8451 I40E_FLAG_MSIX_ENABLED;
8397 8452
8398 if (iommu_present(&pci_bus_type)) 8453 if (iommu_present(&pci_bus_type))
@@ -8474,6 +8529,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
8474 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); 8529 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
8475 } 8530 }
8476 8531
8532 if (pf->hw.func_caps.iwarp) {
8533 pf->flags |= I40E_FLAG_IWARP_ENABLED;
8534 /* IWARP needs one extra vector for CQP just like MISC.*/
8535 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
8536 }
8537
8477#ifdef I40E_FCOE 8538#ifdef I40E_FCOE
8478 i40e_init_pf_fcoe(pf); 8539 i40e_init_pf_fcoe(pf);
8479 8540
@@ -8494,6 +8555,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
8494 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | 8555 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8495 I40E_FLAG_WB_ON_ITR_CAPABLE | 8556 I40E_FLAG_WB_ON_ITR_CAPABLE |
8496 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | 8557 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
8558 I40E_FLAG_NO_PCI_LINK_CHECK |
8497 I40E_FLAG_100M_SGMII_CAPABLE | 8559 I40E_FLAG_100M_SGMII_CAPABLE |
8498 I40E_FLAG_USE_SET_LLDP_MIB | 8560 I40E_FLAG_USE_SET_LLDP_MIB |
8499 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8561 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
@@ -9008,6 +9070,7 @@ static const struct net_device_ops i40e_netdev_ops = {
9008 .ndo_get_vf_config = i40e_ndo_get_vf_config, 9070 .ndo_get_vf_config = i40e_ndo_get_vf_config,
9009 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 9071 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
9010 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, 9072 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
9073 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
9011#if IS_ENABLED(CONFIG_VXLAN) 9074#if IS_ENABLED(CONFIG_VXLAN)
9012 .ndo_add_vxlan_port = i40e_add_vxlan_port, 9075 .ndo_add_vxlan_port = i40e_add_vxlan_port,
9013 .ndo_del_vxlan_port = i40e_del_vxlan_port, 9076 .ndo_del_vxlan_port = i40e_del_vxlan_port,
@@ -9097,6 +9160,12 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
9097 I40E_VLAN_ANY, false, true); 9160 I40E_VLAN_ANY, false, true);
9098 spin_unlock_bh(&vsi->mac_filter_list_lock); 9161 spin_unlock_bh(&vsi->mac_filter_list_lock);
9099 } 9162 }
9163 } else if ((pf->hw.aq.api_maj_ver > 1) ||
9164 ((pf->hw.aq.api_maj_ver == 1) &&
9165 (pf->hw.aq.api_min_ver > 4))) {
9166 /* Supported in FW API version higher than 1.4 */
9167 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
9168 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
9100 } else { 9169 } else {
9101 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 9170 /* relate the VSI_VMDQ name to the VSI_MAIN name */
9102 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 9171 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -9328,6 +9397,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
9328 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9397 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9329 } 9398 }
9330 9399
9400 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
9401 ctxt.info.valid_sections |=
9402 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
9403 ctxt.info.queueing_opt_flags |=
9404 I40E_AQ_VSI_QUE_OPT_TCP_ENA;
9405 }
9406
9331 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 9407 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9332 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 9408 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
9333 if (pf->vf[vsi->vf_id].spoofchk) { 9409 if (pf->vf[vsi->vf_id].spoofchk) {
@@ -9351,6 +9427,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
9351 break; 9427 break;
9352 9428
9353#endif /* I40E_FCOE */ 9429#endif /* I40E_FCOE */
9430 case I40E_VSI_IWARP:
9431 /* send down message to iWARP */
9432 break;
9433
9354 default: 9434 default:
9355 return -ENODEV; 9435 return -ENODEV;
9356 } 9436 }
@@ -10467,6 +10547,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
10467 10547
10468 /* make sure all the fancies are disabled */ 10548 /* make sure all the fancies are disabled */
10469 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 10549 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
10550 I40E_FLAG_IWARP_ENABLED |
10470#ifdef I40E_FCOE 10551#ifdef I40E_FCOE
10471 I40E_FLAG_FCOE_ENABLED | 10552 I40E_FLAG_FCOE_ENABLED |
10472#endif 10553#endif
@@ -10484,6 +10565,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
10484 queues_left -= pf->num_lan_qps; 10565 queues_left -= pf->num_lan_qps;
10485 10566
10486 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 10567 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
10568 I40E_FLAG_IWARP_ENABLED |
10487#ifdef I40E_FCOE 10569#ifdef I40E_FCOE
10488 I40E_FLAG_FCOE_ENABLED | 10570 I40E_FLAG_FCOE_ENABLED |
10489#endif 10571#endif
@@ -10748,6 +10830,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10748 hw->bus.func = PCI_FUNC(pdev->devfn); 10830 hw->bus.func = PCI_FUNC(pdev->devfn);
10749 pf->instance = pfs_found; 10831 pf->instance = pfs_found;
10750 10832
10833 /* set up the locks for the AQ, do this only once in probe
10834 * and destroy them only once in remove
10835 */
10836 mutex_init(&hw->aq.asq_mutex);
10837 mutex_init(&hw->aq.arq_mutex);
10838
10751 if (debug != -1) { 10839 if (debug != -1) {
10752 pf->msg_enable = pf->hw.debug_mask; 10840 pf->msg_enable = pf->hw.debug_mask;
10753 pf->msg_enable = debug; 10841 pf->msg_enable = debug;
@@ -10793,12 +10881,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10793 /* set up a default setting for link flow control */ 10881 /* set up a default setting for link flow control */
10794 pf->hw.fc.requested_mode = I40E_FC_NONE; 10882 pf->hw.fc.requested_mode = I40E_FC_NONE;
10795 10883
10796 /* set up the locks for the AQ, do this only once in probe
10797 * and destroy them only once in remove
10798 */
10799 mutex_init(&hw->aq.asq_mutex);
10800 mutex_init(&hw->aq.arq_mutex);
10801
10802 err = i40e_init_adminq(hw); 10884 err = i40e_init_adminq(hw);
10803 if (err) { 10885 if (err) {
10804 if (err == I40E_ERR_FIRMWARE_API_VERSION) 10886 if (err == I40E_ERR_FIRMWARE_API_VERSION)
@@ -10990,6 +11072,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10990 */ 11072 */
10991 err = i40e_aq_set_phy_int_mask(&pf->hw, 11073 err = i40e_aq_set_phy_int_mask(&pf->hw,
10992 ~(I40E_AQ_EVENT_LINK_UPDOWN | 11074 ~(I40E_AQ_EVENT_LINK_UPDOWN |
11075 I40E_AQ_EVENT_MEDIA_NA |
10993 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); 11076 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10994 if (err) 11077 if (err)
10995 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 11078 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
@@ -11059,7 +11142,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11059 } 11142 }
11060#endif /* CONFIG_PCI_IOV */ 11143#endif /* CONFIG_PCI_IOV */
11061 11144
11062 pfs_found++; 11145 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11146 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11147 pf->num_iwarp_msix,
11148 I40E_IWARP_IRQ_PILE_ID);
11149 if (pf->iwarp_base_vector < 0) {
11150 dev_info(&pdev->dev,
11151 "failed to get tracking for %d vectors for IWARP err=%d\n",
11152 pf->num_iwarp_msix, pf->iwarp_base_vector);
11153 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11154 }
11155 }
11063 11156
11064 i40e_dbg_pf_init(pf); 11157 i40e_dbg_pf_init(pf);
11065 11158
@@ -11070,6 +11163,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11070 mod_timer(&pf->service_timer, 11163 mod_timer(&pf->service_timer,
11071 round_jiffies(jiffies + pf->service_timer_period)); 11164 round_jiffies(jiffies + pf->service_timer_period));
11072 11165
11166 /* add this PF to client device list and launch a client service task */
11167 err = i40e_lan_add_device(pf);
11168 if (err)
11169 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11170 err);
11171
11073#ifdef I40E_FCOE 11172#ifdef I40E_FCOE
11074 /* create FCoE interface */ 11173 /* create FCoE interface */
11075 i40e_fcoe_vsi_setup(pf); 11174 i40e_fcoe_vsi_setup(pf);
@@ -11175,7 +11274,6 @@ err_init_lan_hmc:
11175 kfree(pf->qp_pile); 11274 kfree(pf->qp_pile);
11176err_sw_init: 11275err_sw_init:
11177err_adminq_setup: 11276err_adminq_setup:
11178 (void)i40e_shutdown_adminq(hw);
11179err_pf_reset: 11277err_pf_reset:
11180 iounmap(hw->hw_addr); 11278 iounmap(hw->hw_addr);
11181err_ioremap: 11279err_ioremap:
@@ -11217,8 +11315,10 @@ static void i40e_remove(struct pci_dev *pdev)
11217 /* no more scheduling of any task */ 11315 /* no more scheduling of any task */
11218 set_bit(__I40E_SUSPENDED, &pf->state); 11316 set_bit(__I40E_SUSPENDED, &pf->state);
11219 set_bit(__I40E_DOWN, &pf->state); 11317 set_bit(__I40E_DOWN, &pf->state);
11220 del_timer_sync(&pf->service_timer); 11318 if (pf->service_timer.data)
11221 cancel_work_sync(&pf->service_task); 11319 del_timer_sync(&pf->service_timer);
11320 if (pf->service_task.func)
11321 cancel_work_sync(&pf->service_task);
11222 11322
11223 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 11323 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11224 i40e_free_vfs(pf); 11324 i40e_free_vfs(pf);
@@ -11245,6 +11345,13 @@ static void i40e_remove(struct pci_dev *pdev)
11245 if (pf->vsi[pf->lan_vsi]) 11345 if (pf->vsi[pf->lan_vsi])
11246 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 11346 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11247 11347
11348 /* remove attached clients */
11349 ret_code = i40e_lan_del_device(pf);
11350 if (ret_code) {
11351 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11352 ret_code);
11353 }
11354
11248 /* shutdown and destroy the HMC */ 11355 /* shutdown and destroy the HMC */
11249 if (hw->hmc.hmc_obj) { 11356 if (hw->hmc.hmc_obj) {
11250 ret_code = i40e_shutdown_lan_hmc(hw); 11357 ret_code = i40e_shutdown_lan_hmc(hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 5730f8091e1b..f2cea3d25de3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -696,7 +696,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
696 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", 696 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
697 i40e_nvm_update_state_str[upd_cmd], 697 i40e_nvm_update_state_str[upd_cmd],
698 hw->nvmupd_state, 698 hw->nvmupd_state,
699 hw->aq.nvm_release_on_done, 699 hw->nvm_release_on_done,
700 cmd->command, cmd->config, cmd->offset, cmd->data_size); 700 cmd->command, cmd->config, cmd->offset, cmd->data_size);
701 701
702 if (upd_cmd == I40E_NVMUPD_INVALID) { 702 if (upd_cmd == I40E_NVMUPD_INVALID) {
@@ -799,7 +799,7 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
799 if (status) { 799 if (status) {
800 i40e_release_nvm(hw); 800 i40e_release_nvm(hw);
801 } else { 801 } else {
802 hw->aq.nvm_release_on_done = true; 802 hw->nvm_release_on_done = true;
803 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 803 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
804 } 804 }
805 } 805 }
@@ -815,7 +815,7 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
815 if (status) { 815 if (status) {
816 i40e_release_nvm(hw); 816 i40e_release_nvm(hw);
817 } else { 817 } else {
818 hw->aq.nvm_release_on_done = true; 818 hw->nvm_release_on_done = true;
819 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 819 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
820 } 820 }
821 } 821 }
@@ -849,7 +849,7 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
849 -EIO; 849 -EIO;
850 i40e_release_nvm(hw); 850 i40e_release_nvm(hw);
851 } else { 851 } else {
852 hw->aq.nvm_release_on_done = true; 852 hw->nvm_release_on_done = true;
853 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 853 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
854 } 854 }
855 } 855 }
@@ -953,7 +953,7 @@ retry:
953 -EIO; 953 -EIO;
954 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 954 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
955 } else { 955 } else {
956 hw->aq.nvm_release_on_done = true; 956 hw->nvm_release_on_done = true;
957 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 957 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
958 } 958 }
959 break; 959 break;
@@ -980,7 +980,7 @@ retry:
980 -EIO; 980 -EIO;
981 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 981 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
982 } else { 982 } else {
983 hw->aq.nvm_release_on_done = true; 983 hw->nvm_release_on_done = true;
984 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; 984 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
985 } 985 }
986 break; 986 break;
@@ -1030,6 +1030,37 @@ retry:
1030} 1030}
1031 1031
1032/** 1032/**
1033 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1034 * @hw: pointer to the hardware structure
1035 * @opcode: the event that just happened
1036 **/
1037void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
1038{
1039 if (opcode == i40e_aqc_opc_nvm_erase ||
1040 opcode == i40e_aqc_opc_nvm_update) {
1041 i40e_debug(hw, I40E_DEBUG_NVM,
1042 "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
1043 if (hw->nvm_release_on_done) {
1044 i40e_release_nvm(hw);
1045 hw->nvm_release_on_done = false;
1046 }
1047
1048 switch (hw->nvmupd_state) {
1049 case I40E_NVMUPD_STATE_INIT_WAIT:
1050 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1051 break;
1052
1053 case I40E_NVMUPD_STATE_WRITE_WAIT:
1054 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1055 break;
1056
1057 default:
1058 break;
1059 }
1060 }
1061}
1062
1063/**
1033 * i40e_nvmupd_validate_command - Validate given command 1064 * i40e_nvmupd_validate_command - Validate given command
1034 * @hw: pointer to hardware structure 1065 * @hw: pointer to hardware structure
1035 * @cmd: pointer to nvm update command buffer 1066 * @cmd: pointer to nvm update command buffer
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index d51eee5bf79a..134035f53f2c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -308,6 +308,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
308i40e_status i40e_nvmupd_command(struct i40e_hw *hw, 308i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
309 struct i40e_nvm_access *cmd, 309 struct i40e_nvm_access *cmd,
310 u8 *bytes, int *); 310 u8 *bytes, int *);
311void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
311void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); 312void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
312 313
313extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; 314extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 084d0ab316b7..29ffed27e5a9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -636,19 +636,21 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
636 636
637/** 637/**
638 * i40e_clean_tx_irq - Reclaim resources after transmit completes 638 * i40e_clean_tx_irq - Reclaim resources after transmit completes
639 * @tx_ring: tx ring to clean 639 * @vsi: the VSI we care about
640 * @budget: how many cleans we're allowed 640 * @tx_ring: Tx ring to clean
641 * @napi_budget: Used to determine if we are in netpoll
641 * 642 *
642 * Returns true if there's any budget left (e.g. the clean is finished) 643 * Returns true if there's any budget left (e.g. the clean is finished)
643 **/ 644 **/
644static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) 645static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
646 struct i40e_ring *tx_ring, int napi_budget)
645{ 647{
646 u16 i = tx_ring->next_to_clean; 648 u16 i = tx_ring->next_to_clean;
647 struct i40e_tx_buffer *tx_buf; 649 struct i40e_tx_buffer *tx_buf;
648 struct i40e_tx_desc *tx_head; 650 struct i40e_tx_desc *tx_head;
649 struct i40e_tx_desc *tx_desc; 651 struct i40e_tx_desc *tx_desc;
650 unsigned int total_packets = 0; 652 unsigned int total_bytes = 0, total_packets = 0;
651 unsigned int total_bytes = 0; 653 unsigned int budget = vsi->work_limit;
652 654
653 tx_buf = &tx_ring->tx_bi[i]; 655 tx_buf = &tx_ring->tx_bi[i];
654 tx_desc = I40E_TX_DESC(tx_ring, i); 656 tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -678,7 +680,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
678 total_packets += tx_buf->gso_segs; 680 total_packets += tx_buf->gso_segs;
679 681
680 /* free the skb */ 682 /* free the skb */
681 dev_consume_skb_any(tx_buf->skb); 683 napi_consume_skb(tx_buf->skb, napi_budget);
682 684
683 /* unmap skb header data */ 685 /* unmap skb header data */
684 dma_unmap_single(tx_ring->dev, 686 dma_unmap_single(tx_ring->dev,
@@ -749,7 +751,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
749 751
750 if (budget && 752 if (budget &&
751 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) && 753 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
752 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && 754 !test_bit(__I40E_DOWN, &vsi->state) &&
753 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) 755 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
754 tx_ring->arm_wb = true; 756 tx_ring->arm_wb = true;
755 } 757 }
@@ -767,7 +769,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
767 smp_mb(); 769 smp_mb();
768 if (__netif_subqueue_stopped(tx_ring->netdev, 770 if (__netif_subqueue_stopped(tx_ring->netdev,
769 tx_ring->queue_index) && 771 tx_ring->queue_index) &&
770 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { 772 !test_bit(__I40E_DOWN, &vsi->state)) {
771 netif_wake_subqueue(tx_ring->netdev, 773 netif_wake_subqueue(tx_ring->netdev,
772 tx_ring->queue_index); 774 tx_ring->queue_index);
773 ++tx_ring->tx_stats.restart_queue; 775 ++tx_ring->tx_stats.restart_queue;
@@ -1701,7 +1703,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
1701 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) 1703 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1702 : 0; 1704 : 0;
1703#ifdef I40E_FCOE 1705#ifdef I40E_FCOE
1704 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { 1706 if (unlikely(
1707 i40e_rx_is_fcoe(rx_ptype) &&
1708 !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
1705 dev_kfree_skb_any(skb); 1709 dev_kfree_skb_any(skb);
1706 continue; 1710 continue;
1707 } 1711 }
@@ -1832,7 +1836,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1832 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) 1836 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1833 : 0; 1837 : 0;
1834#ifdef I40E_FCOE 1838#ifdef I40E_FCOE
1835 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { 1839 if (unlikely(
1840 i40e_rx_is_fcoe(rx_ptype) &&
1841 !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
1836 dev_kfree_skb_any(skb); 1842 dev_kfree_skb_any(skb);
1837 continue; 1843 continue;
1838 } 1844 }
@@ -1975,9 +1981,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1975 * budget and be more aggressive about cleaning up the Tx descriptors. 1981 * budget and be more aggressive about cleaning up the Tx descriptors.
1976 */ 1982 */
1977 i40e_for_each_ring(ring, q_vector->tx) { 1983 i40e_for_each_ring(ring, q_vector->tx) {
1978 clean_complete = clean_complete && 1984 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
1979 i40e_clean_tx_irq(ring, vsi->work_limit); 1985 clean_complete = false;
1980 arm_wb = arm_wb || ring->arm_wb; 1986 continue;
1987 }
1988 arm_wb |= ring->arm_wb;
1981 ring->arm_wb = false; 1989 ring->arm_wb = false;
1982 } 1990 }
1983 1991
@@ -1999,8 +2007,9 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1999 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); 2007 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
2000 2008
2001 work_done += cleaned; 2009 work_done += cleaned;
2002 /* if we didn't clean as many as budgeted, we must be done */ 2010 /* if we clean as many as budgeted, we must not be done */
2003 clean_complete = clean_complete && (budget_per_ring > cleaned); 2011 if (cleaned >= budget_per_ring)
2012 clean_complete = false;
2004 } 2013 }
2005 2014
2006 /* If work not completed, return budget and polling will return */ 2015 /* If work not completed, return budget and polling will return */
@@ -2247,15 +2256,13 @@ out:
2247 2256
2248/** 2257/**
2249 * i40e_tso - set up the tso context descriptor 2258 * i40e_tso - set up the tso context descriptor
2250 * @tx_ring: ptr to the ring to send
2251 * @skb: ptr to the skb we're sending 2259 * @skb: ptr to the skb we're sending
2252 * @hdr_len: ptr to the size of the packet header 2260 * @hdr_len: ptr to the size of the packet header
2253 * @cd_type_cmd_tso_mss: Quad Word 1 2261 * @cd_type_cmd_tso_mss: Quad Word 1
2254 * 2262 *
2255 * Returns 0 if no TSO can happen, 1 if tso is going, or error 2263 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2256 **/ 2264 **/
2257static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, 2265static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
2258 u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
2259{ 2266{
2260 u64 cd_cmd, cd_tso_len, cd_mss; 2267 u64 cd_cmd, cd_tso_len, cd_mss;
2261 union { 2268 union {
@@ -2299,9 +2306,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2299 l4_offset = l4.hdr - skb->data; 2306 l4_offset = l4.hdr - skb->data;
2300 2307
2301 /* remove payload length from outer checksum */ 2308 /* remove payload length from outer checksum */
2302 paylen = (__force u16)l4.udp->check; 2309 paylen = skb->len - l4_offset;
2303 paylen += ntohs(1) * (u16)~(skb->len - l4_offset); 2310 csum_replace_by_diff(&l4.udp->check, htonl(paylen));
2304 l4.udp->check = ~csum_fold((__force __wsum)paylen);
2305 } 2311 }
2306 2312
2307 /* reset pointers to inner headers */ 2313 /* reset pointers to inner headers */
@@ -2321,9 +2327,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2321 l4_offset = l4.hdr - skb->data; 2327 l4_offset = l4.hdr - skb->data;
2322 2328
2323 /* remove payload length from inner checksum */ 2329 /* remove payload length from inner checksum */
2324 paylen = (__force u16)l4.tcp->check; 2330 paylen = skb->len - l4_offset;
2325 paylen += ntohs(1) * (u16)~(skb->len - l4_offset); 2331 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
2326 l4.tcp->check = ~csum_fold((__force __wsum)paylen);
2327 2332
2328 /* compute length of segmentation header */ 2333 /* compute length of segmentation header */
2329 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 2334 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
@@ -2405,7 +2410,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2405 unsigned char *hdr; 2410 unsigned char *hdr;
2406 } l4; 2411 } l4;
2407 unsigned char *exthdr; 2412 unsigned char *exthdr;
2408 u32 offset, cmd = 0, tunnel = 0; 2413 u32 offset, cmd = 0;
2409 __be16 frag_off; 2414 __be16 frag_off;
2410 u8 l4_proto = 0; 2415 u8 l4_proto = 0;
2411 2416
@@ -2419,6 +2424,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2419 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; 2424 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2420 2425
2421 if (skb->encapsulation) { 2426 if (skb->encapsulation) {
2427 u32 tunnel = 0;
2422 /* define outer network header type */ 2428 /* define outer network header type */
2423 if (*tx_flags & I40E_TX_FLAGS_IPV4) { 2429 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2424 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? 2430 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
@@ -2717,6 +2723,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2717 tx_bi = first; 2723 tx_bi = first;
2718 2724
2719 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 2725 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2726 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2727
2720 if (dma_mapping_error(tx_ring->dev, dma)) 2728 if (dma_mapping_error(tx_ring->dev, dma))
2721 goto dma_error; 2729 goto dma_error;
2722 2730
@@ -2724,12 +2732,14 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2724 dma_unmap_len_set(tx_bi, len, size); 2732 dma_unmap_len_set(tx_bi, len, size);
2725 dma_unmap_addr_set(tx_bi, dma, dma); 2733 dma_unmap_addr_set(tx_bi, dma, dma);
2726 2734
2735 /* align size to end of page */
2736 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
2727 tx_desc->buffer_addr = cpu_to_le64(dma); 2737 tx_desc->buffer_addr = cpu_to_le64(dma);
2728 2738
2729 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { 2739 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2730 tx_desc->cmd_type_offset_bsz = 2740 tx_desc->cmd_type_offset_bsz =
2731 build_ctob(td_cmd, td_offset, 2741 build_ctob(td_cmd, td_offset,
2732 I40E_MAX_DATA_PER_TXD, td_tag); 2742 max_data, td_tag);
2733 2743
2734 tx_desc++; 2744 tx_desc++;
2735 i++; 2745 i++;
@@ -2740,9 +2750,10 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2740 i = 0; 2750 i = 0;
2741 } 2751 }
2742 2752
2743 dma += I40E_MAX_DATA_PER_TXD; 2753 dma += max_data;
2744 size -= I40E_MAX_DATA_PER_TXD; 2754 size -= max_data;
2745 2755
2756 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2746 tx_desc->buffer_addr = cpu_to_le64(dma); 2757 tx_desc->buffer_addr = cpu_to_le64(dma);
2747 } 2758 }
2748 2759
@@ -2892,7 +2903,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2892 if (i40e_chk_linearize(skb, count)) { 2903 if (i40e_chk_linearize(skb, count)) {
2893 if (__skb_linearize(skb)) 2904 if (__skb_linearize(skb))
2894 goto out_drop; 2905 goto out_drop;
2895 count = TXD_USE_COUNT(skb->len); 2906 count = i40e_txd_use_count(skb->len);
2896 tx_ring->tx_stats.tx_linearize++; 2907 tx_ring->tx_stats.tx_linearize++;
2897 } 2908 }
2898 2909
@@ -2923,7 +2934,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2923 else if (protocol == htons(ETH_P_IPV6)) 2934 else if (protocol == htons(ETH_P_IPV6))
2924 tx_flags |= I40E_TX_FLAGS_IPV6; 2935 tx_flags |= I40E_TX_FLAGS_IPV6;
2925 2936
2926 tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); 2937 tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
2927 2938
2928 if (tso < 0) 2939 if (tso < 0)
2929 goto out_drop; 2940 goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index cdd5dc00aec5..77ccdde56c0c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -146,10 +146,39 @@ enum i40e_dyn_idx_t {
146 146
147#define I40E_MAX_BUFFER_TXD 8 147#define I40E_MAX_BUFFER_TXD 8
148#define I40E_MIN_TX_LEN 17 148#define I40E_MIN_TX_LEN 17
149#define I40E_MAX_DATA_PER_TXD 8192 149
150/* The size limit for a transmit buffer in a descriptor is (16K - 1).
151 * In order to align with the read requests we will align the value to
152 * the nearest 4K which represents our maximum read request size.
153 */
154#define I40E_MAX_READ_REQ_SIZE 4096
155#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
156#define I40E_MAX_DATA_PER_TXD_ALIGNED \
157 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
158
159/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
160 * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
161 * that 12K is not a power of 2 and division is expensive. It is used to
162 * approximate the number of descriptors used per linear buffer. Note
163 * that this will overestimate in some cases as it doesn't account for the
164 * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
165 * the error should not impact things much as large buffers usually mean
166 * we will use fewer descriptors then there are frags in an skb.
167 */
168static inline unsigned int i40e_txd_use_count(unsigned int size)
169{
170 const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
171 const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
172 unsigned int adjust = ~(u32)0;
173
174 /* if we rounded up on the reciprocal pull down the adjustment */
175 if ((max * reciprocal) > adjust)
176 adjust = ~(u32)(reciprocal - 1);
177
178 return (u32)((((u64)size * reciprocal) + adjust) >> 32);
179}
150 180
151/* Tx Descriptors needed, worst case */ 181/* Tx Descriptors needed, worst case */
152#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
153#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 182#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
154#define I40E_MIN_DESC_PENDING 4 183#define I40E_MIN_DESC_PENDING 4
155 184
@@ -377,7 +406,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
377 int count = 0, size = skb_headlen(skb); 406 int count = 0, size = skb_headlen(skb);
378 407
379 for (;;) { 408 for (;;) {
380 count += TXD_USE_COUNT(size); 409 count += i40e_txd_use_count(size);
381 410
382 if (!nr_frags--) 411 if (!nr_frags--)
383 break; 412 break;
@@ -419,4 +448,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
419 448
420 return __i40e_chk_linearize(skb); 449 return __i40e_chk_linearize(skb);
421} 450}
451
452/**
453 * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
454 * @ptype: the packet type field from Rx descriptor write-back
455 **/
456static inline bool i40e_rx_is_fcoe(u16 ptype)
457{
458 return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
459 (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
460}
422#endif /* _I40E_TXRX_H_ */ 461#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 0a0baf71041b..793036b259e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -78,7 +78,7 @@ enum i40e_debug_mask {
78 I40E_DEBUG_DCB = 0x00000400, 78 I40E_DEBUG_DCB = 0x00000400,
79 I40E_DEBUG_DIAG = 0x00000800, 79 I40E_DEBUG_DIAG = 0x00000800,
80 I40E_DEBUG_FD = 0x00001000, 80 I40E_DEBUG_FD = 0x00001000,
81 81 I40E_DEBUG_IWARP = 0x00F00000,
82 I40E_DEBUG_AQ_MESSAGE = 0x01000000, 82 I40E_DEBUG_AQ_MESSAGE = 0x01000000,
83 I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, 83 I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
84 I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, 84 I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
@@ -160,6 +160,7 @@ enum i40e_vsi_type {
160 I40E_VSI_MIRROR = 5, 160 I40E_VSI_MIRROR = 5,
161 I40E_VSI_SRIOV = 6, 161 I40E_VSI_SRIOV = 6,
162 I40E_VSI_FDIR = 7, 162 I40E_VSI_FDIR = 7,
163 I40E_VSI_IWARP = 8,
163 I40E_VSI_TYPE_UNKNOWN 164 I40E_VSI_TYPE_UNKNOWN
164}; 165};
165 166
@@ -548,6 +549,7 @@ struct i40e_hw {
548 enum i40e_nvmupd_state nvmupd_state; 549 enum i40e_nvmupd_state nvmupd_state;
549 struct i40e_aq_desc nvm_wb_desc; 550 struct i40e_aq_desc nvm_wb_desc;
550 struct i40e_virt_mem nvm_buff; 551 struct i40e_virt_mem nvm_buff;
552 bool nvm_release_on_done;
551 553
552 /* HMC info */ 554 /* HMC info */
553 struct i40e_hmc_info hmc; /* HMC info struct */ 555 struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1532,4 +1534,37 @@ struct i40e_lldp_variables {
1532 1534
1533/* RSS Hash Table Size */ 1535/* RSS Hash Table Size */
1534#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 1536#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
1537
1538/* INPUT SET MASK for RSS, flow director, and flexible payload */
1539#define I40E_L3_SRC_SHIFT 47
1540#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
1541#define I40E_L3_V6_SRC_SHIFT 43
1542#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT)
1543#define I40E_L3_DST_SHIFT 35
1544#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT)
1545#define I40E_L3_V6_DST_SHIFT 35
1546#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT)
1547#define I40E_L4_SRC_SHIFT 34
1548#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT)
1549#define I40E_L4_DST_SHIFT 33
1550#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
1551#define I40E_VERIFY_TAG_SHIFT 31
1552#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
1553
1554#define I40E_FLEX_50_SHIFT 13
1555#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
1556#define I40E_FLEX_51_SHIFT 12
1557#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT)
1558#define I40E_FLEX_52_SHIFT 11
1559#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT)
1560#define I40E_FLEX_53_SHIFT 10
1561#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT)
1562#define I40E_FLEX_54_SHIFT 9
1563#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT)
1564#define I40E_FLEX_55_SHIFT 8
1565#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT)
1566#define I40E_FLEX_56_SHIFT 7
1567#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
1568#define I40E_FLEX_57_SHIFT 6
1569#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
1535#endif /* _I40E_TYPE_H_ */ 1570#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 3226946bf3d4..c92a3bdee229 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -80,7 +80,15 @@ enum i40e_virtchnl_ops {
80 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, 80 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
81 I40E_VIRTCHNL_OP_GET_STATS = 15, 81 I40E_VIRTCHNL_OP_GET_STATS = 15,
82 I40E_VIRTCHNL_OP_FCOE = 16, 82 I40E_VIRTCHNL_OP_FCOE = 16,
83 I40E_VIRTCHNL_OP_EVENT = 17, 83 I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
84 I40E_VIRTCHNL_OP_IWARP = 20,
85 I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
86 I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
87 I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
88 I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
89 I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
90 I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
91
84}; 92};
85 93
86/* Virtual channel message descriptor. This overlays the admin queue 94/* Virtual channel message descriptor. This overlays the admin queue
@@ -154,6 +162,7 @@ struct i40e_virtchnl_vsi_resource {
154#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 162#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
155#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 163#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
156#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 164#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
165#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
157 166
158struct i40e_virtchnl_vf_resource { 167struct i40e_virtchnl_vf_resource {
159 u16 num_vsis; 168 u16 num_vsis;
@@ -162,8 +171,8 @@ struct i40e_virtchnl_vf_resource {
162 u16 max_mtu; 171 u16 max_mtu;
163 172
164 u32 vf_offload_flags; 173 u32 vf_offload_flags;
165 u32 max_fcoe_contexts; 174 u32 rss_key_size;
166 u32 max_fcoe_filters; 175 u32 rss_lut_size;
167 176
168 struct i40e_virtchnl_vsi_resource vsi_res[1]; 177 struct i40e_virtchnl_vsi_resource vsi_res[1];
169}; 178};
@@ -322,6 +331,39 @@ struct i40e_virtchnl_promisc_info {
322 * PF replies with struct i40e_eth_stats in an external buffer. 331 * PF replies with struct i40e_eth_stats in an external buffer.
323 */ 332 */
324 333
334/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
335 * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
336 * VF sends these messages to configure RSS. Only supported if both PF
337 * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
338 * configuration negotiation. If this is the case, then the RSS fields in
339 * the VF resource struct are valid.
340 * Both the key and LUT are initialized to 0 by the PF, meaning that
341 * RSS is effectively disabled until set up by the VF.
342 */
343struct i40e_virtchnl_rss_key {
344 u16 vsi_id;
345 u16 key_len;
346 u8 key[1]; /* RSS hash key, packed bytes */
347};
348
349struct i40e_virtchnl_rss_lut {
350 u16 vsi_id;
351 u16 lut_entries;
352 u8 lut[1]; /* RSS lookup table*/
353};
354
355/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
356 * I40E_VIRTCHNL_OP_SET_RSS_HENA
357 * VF sends these messages to get and set the hash filter enable bits for RSS.
358 * By default, the PF sets these to all possible traffic types that the
359 * hardware supports. The VF can query this value if it wants to change the
360 * traffic types that are hashed by the hardware.
361 * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
362 */
363struct i40e_virtchnl_rss_hena {
364 u64 hena;
365};
366
325/* I40E_VIRTCHNL_OP_EVENT 367/* I40E_VIRTCHNL_OP_EVENT
326 * PF sends this message to inform the VF driver of events that may affect it. 368 * PF sends this message to inform the VF driver of events that may affect it.
327 * No direct response is expected from the VF, though it may generate other 369 * No direct response is expected from the VF, though it may generate other
@@ -348,6 +390,37 @@ struct i40e_virtchnl_pf_event {
348 int severity; 390 int severity;
349}; 391};
350 392
393/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
394 * VF uses this message to request PF to map IWARP vectors to IWARP queues.
395 * The request for this originates from the VF IWARP driver through
396 * a client interface between VF LAN and VF IWARP driver.
397 * A vector could have an AEQ and CEQ attached to it although
398 * there is a single AEQ per VF IWARP instance in which case
399 * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
400 * There will never be a case where there will be multiple CEQs attached
401 * to a single vector.
402 * PF configures interrupt mapping and returns status.
403 */
404
405/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
406 * In order for us to keep the interface simple, SW will define a
407 * unique type value for AEQ.
408*/
409#define I40E_QUEUE_TYPE_PE_AEQ 0x80
410#define I40E_QUEUE_INVALID_IDX 0xFFFF
411
412struct i40e_virtchnl_iwarp_qv_info {
413 u32 v_idx; /* msix_vector */
414 u16 ceq_idx;
415 u16 aeq_idx;
416 u8 itr_idx;
417};
418
419struct i40e_virtchnl_iwarp_qvlist_info {
420 u32 num_vectors;
421 struct i40e_virtchnl_iwarp_qv_info qv_info[1];
422};
423
351/* VF reset states - these are written into the RSTAT register: 424/* VF reset states - these are written into the RSTAT register:
352 * I40E_VFGEN_RSTAT1 on the PF 425 * I40E_VFGEN_RSTAT1 on the PF
353 * I40E_VFGEN_RSTAT on the VF 426 * I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index acd2693a4e97..30f8cbe6b54b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -63,7 +63,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
63} 63}
64 64
65/** 65/**
66 * i40e_vc_notify_link_state 66 * i40e_vc_notify_vf_link_state
67 * @vf: pointer to the VF structure 67 * @vf: pointer to the VF structure
68 * 68 *
69 * send a link status message to a single VF 69 * send a link status message to a single VF
@@ -352,6 +352,136 @@ irq_list_done:
352} 352}
353 353
354/** 354/**
355 * i40e_release_iwarp_qvlist
356 * @vf: pointer to the VF.
357 *
358 **/
359static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
360{
361 struct i40e_pf *pf = vf->pf;
362 struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
363 u32 msix_vf;
364 u32 i;
365
366 if (!vf->qvlist_info)
367 return;
368
369 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
370 for (i = 0; i < qvlist_info->num_vectors; i++) {
371 struct i40e_virtchnl_iwarp_qv_info *qv_info;
372 u32 next_q_index, next_q_type;
373 struct i40e_hw *hw = &pf->hw;
374 u32 v_idx, reg_idx, reg;
375
376 qv_info = &qvlist_info->qv_info[i];
377 if (!qv_info)
378 continue;
379 v_idx = qv_info->v_idx;
380 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
381 /* Figure out the queue after CEQ and make that the
382 * first queue.
383 */
384 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
385 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
386 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
387 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
388 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
389 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
390
391 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
392 reg = (next_q_index &
393 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
394 (next_q_type <<
395 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
396
397 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
398 }
399 }
400 kfree(vf->qvlist_info);
401 vf->qvlist_info = NULL;
402}
403
404/**
405 * i40e_config_iwarp_qvlist
406 * @vf: pointer to the VF info
407 * @qvlist_info: queue and vector list
408 *
409 * Return 0 on success or < 0 on error
410 **/
411static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
412 struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info)
413{
414 struct i40e_pf *pf = vf->pf;
415 struct i40e_hw *hw = &pf->hw;
416 struct i40e_virtchnl_iwarp_qv_info *qv_info;
417 u32 v_idx, i, reg_idx, reg;
418 u32 next_q_idx, next_q_type;
419 u32 msix_vf, size;
420
421 size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
422 (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
423 (qvlist_info->num_vectors - 1));
424 vf->qvlist_info = kzalloc(size, GFP_KERNEL);
425 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
426
427 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
428 for (i = 0; i < qvlist_info->num_vectors; i++) {
429 qv_info = &qvlist_info->qv_info[i];
430 if (!qv_info)
431 continue;
432 v_idx = qv_info->v_idx;
433
434 /* Validate vector id belongs to this vf */
435 if (!i40e_vc_isvalid_vector_id(vf, v_idx))
436 goto err;
437
438 vf->qvlist_info->qv_info[i] = *qv_info;
439
440 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
441 /* We might be sharing the interrupt, so get the first queue
442 * index and type, push it down the list by adding the new
443 * queue on top. Also link it with the new queue in CEQCTL.
444 */
445 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
446 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
447 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
448 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
449 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
450
451 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
452 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
453 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
454 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
455 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
456 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
457 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
458 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
459
460 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
461 reg = (qv_info->ceq_idx &
462 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
463 (I40E_QUEUE_TYPE_PE_CEQ <<
464 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
465 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
466 }
467
468 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
469 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
470 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
471 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
472
473 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
474 }
475 }
476
477 return 0;
478err:
479 kfree(vf->qvlist_info);
480 vf->qvlist_info = NULL;
481 return -EINVAL;
482}
483
484/**
355 * i40e_config_vsi_tx_queue 485 * i40e_config_vsi_tx_queue
356 * @vf: pointer to the VF info 486 * @vf: pointer to the VF info
357 * @vsi_id: id of VSI as provided by the FW 487 * @vsi_id: id of VSI as provided by the FW
@@ -787,9 +917,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
787{ 917{
788 struct i40e_pf *pf = vf->pf; 918 struct i40e_pf *pf = vf->pf;
789 struct i40e_hw *hw = &pf->hw; 919 struct i40e_hw *hw = &pf->hw;
920 u32 reg, reg_idx, bit_idx;
790 bool rsd = false; 921 bool rsd = false;
791 int i; 922 int i;
792 u32 reg;
793 923
794 if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 924 if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
795 return; 925 return;
@@ -807,6 +937,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
807 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 937 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
808 i40e_flush(hw); 938 i40e_flush(hw);
809 } 939 }
940 /* clear the VFLR bit in GLGEN_VFLRSTAT */
941 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
942 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
943 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
944 i40e_flush(hw);
810 945
811 if (i40e_quiesce_vf_pci(vf)) 946 if (i40e_quiesce_vf_pci(vf))
812 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", 947 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
@@ -850,12 +985,15 @@ complete_reset:
850 /* reallocate VF resources to reset the VSI state */ 985 /* reallocate VF resources to reset the VSI state */
851 i40e_free_vf_res(vf); 986 i40e_free_vf_res(vf);
852 if (!i40e_alloc_vf_res(vf)) { 987 if (!i40e_alloc_vf_res(vf)) {
988 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
853 i40e_enable_vf_mappings(vf); 989 i40e_enable_vf_mappings(vf);
854 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 990 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
855 clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 991 clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
992 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
856 } 993 }
857 /* tell the VF the reset is done */ 994 /* tell the VF the reset is done */
858 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 995 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
996
859 i40e_flush(hw); 997 i40e_flush(hw);
860 clear_bit(__I40E_VF_DISABLE, &pf->state); 998 clear_bit(__I40E_VF_DISABLE, &pf->state);
861} 999}
@@ -877,11 +1015,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
877 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) 1015 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
878 usleep_range(1000, 2000); 1016 usleep_range(1000, 2000);
879 1017
880 for (i = 0; i < pf->num_alloc_vfs; i++) 1018 i40e_notify_client_of_vf_enable(pf, 0);
881 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
882 i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
883 false);
884
885 for (i = 0; i < pf->num_alloc_vfs; i++) 1019 for (i = 0; i < pf->num_alloc_vfs; i++)
886 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) 1020 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
887 i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], 1021 i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
@@ -953,6 +1087,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
953 goto err_iov; 1087 goto err_iov;
954 } 1088 }
955 } 1089 }
1090 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
956 /* allocate memory */ 1091 /* allocate memory */
957 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); 1092 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
958 if (!vfs) { 1093 if (!vfs) {
@@ -1098,8 +1233,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1098 /* single place to detect unsuccessful return values */ 1233 /* single place to detect unsuccessful return values */
1099 if (v_retval) { 1234 if (v_retval) {
1100 vf->num_invalid_msgs++; 1235 vf->num_invalid_msgs++;
1101 dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n", 1236 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1102 vf->vf_id, v_opcode, v_retval); 1237 vf->vf_id, v_opcode, v_retval);
1103 if (vf->num_invalid_msgs > 1238 if (vf->num_invalid_msgs >
1104 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { 1239 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1105 dev_err(&pf->pdev->dev, 1240 dev_err(&pf->pdev->dev,
@@ -1117,9 +1252,9 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1117 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1252 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1118 msg, msglen, NULL); 1253 msg, msglen, NULL);
1119 if (aq_ret) { 1254 if (aq_ret) {
1120 dev_err(&pf->pdev->dev, 1255 dev_info(&pf->pdev->dev,
1121 "Unable to send the message to VF %d aq_err %d\n", 1256 "Unable to send the message to VF %d aq_err %d\n",
1122 vf->vf_id, pf->hw.aq.asq_last_status); 1257 vf->vf_id, pf->hw.aq.asq_last_status);
1123 return -EIO; 1258 return -EIO;
1124 } 1259 }
1125 1260
@@ -1177,8 +1312,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1177 struct i40e_pf *pf = vf->pf; 1312 struct i40e_pf *pf = vf->pf;
1178 i40e_status aq_ret = 0; 1313 i40e_status aq_ret = 0;
1179 struct i40e_vsi *vsi; 1314 struct i40e_vsi *vsi;
1180 int i = 0, len = 0;
1181 int num_vsis = 1; 1315 int num_vsis = 1;
1316 int len = 0;
1182 int ret; 1317 int ret;
1183 1318
1184 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1319 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
@@ -1206,6 +1341,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1206 vsi = pf->vsi[vf->lan_vsi_idx]; 1341 vsi = pf->vsi[vf->lan_vsi_idx];
1207 if (!vsi->info.pvid) 1342 if (!vsi->info.pvid)
1208 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1343 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
1344
1345 if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) &&
1346 (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
1347 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
1348 set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
1349 }
1350
1209 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { 1351 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
1210 if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) 1352 if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1211 vfres->vf_offload_flags |= 1353 vfres->vf_offload_flags |=
@@ -1220,8 +1362,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1220 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 1362 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1221 } 1363 }
1222 1364
1223 if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) 1365 if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1366 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1367 dev_err(&pf->pdev->dev,
1368 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1369 vf->vf_id);
1370 ret = I40E_ERR_PARAM;
1371 goto err;
1372 }
1224 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; 1373 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1374 }
1225 1375
1226 if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { 1376 if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
1227 if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1377 if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
@@ -1233,15 +1383,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1233 vfres->num_queue_pairs = vf->num_queue_pairs; 1383 vfres->num_queue_pairs = vf->num_queue_pairs;
1234 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1384 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1235 if (vf->lan_vsi_idx) { 1385 if (vf->lan_vsi_idx) {
1236 vfres->vsi_res[i].vsi_id = vf->lan_vsi_id; 1386 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1237 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; 1387 vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
1238 vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs; 1388 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1239 /* VFs only use TC 0 */ 1389 /* VFs only use TC 0 */
1240 vfres->vsi_res[i].qset_handle 1390 vfres->vsi_res[0].qset_handle
1241 = le16_to_cpu(vsi->info.qs_handle[0]); 1391 = le16_to_cpu(vsi->info.qs_handle[0]);
1242 ether_addr_copy(vfres->vsi_res[i].default_mac_addr, 1392 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1243 vf->default_lan_addr.addr); 1393 vf->default_lan_addr.addr);
1244 i++;
1245 } 1394 }
1246 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 1395 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
1247 1396
@@ -1827,6 +1976,72 @@ error_param:
1827} 1976}
1828 1977
1829/** 1978/**
1979 * i40e_vc_iwarp_msg
1980 * @vf: pointer to the VF info
1981 * @msg: pointer to the msg buffer
1982 * @msglen: msg length
1983 *
1984 * called from the VF for the iwarp msgs
1985 **/
1986static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1987{
1988 struct i40e_pf *pf = vf->pf;
1989 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
1990 i40e_status aq_ret = 0;
1991
1992 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1993 !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
1994 aq_ret = I40E_ERR_PARAM;
1995 goto error_param;
1996 }
1997
1998 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
1999 msg, msglen);
2000
2001error_param:
2002 /* send the response to the VF */
2003 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP,
2004 aq_ret);
2005}
2006
2007/**
2008 * i40e_vc_iwarp_qvmap_msg
2009 * @vf: pointer to the VF info
2010 * @msg: pointer to the msg buffer
2011 * @msglen: msg length
2012 * @config: config qvmap or release it
2013 *
2014 * called from the VF for the iwarp msgs
2015 **/
2016static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
2017 bool config)
2018{
2019 struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info =
2020 (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
2021 i40e_status aq_ret = 0;
2022
2023 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
2024 !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
2025 aq_ret = I40E_ERR_PARAM;
2026 goto error_param;
2027 }
2028
2029 if (config) {
2030 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2031 aq_ret = I40E_ERR_PARAM;
2032 } else {
2033 i40e_release_iwarp_qvlist(vf);
2034 }
2035
2036error_param:
2037 /* send the response to the VF */
2038 return i40e_vc_send_resp_to_vf(vf,
2039 config ? I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP :
2040 I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
2041 aq_ret);
2042}
2043
2044/**
1830 * i40e_vc_validate_vf_msg 2045 * i40e_vc_validate_vf_msg
1831 * @vf: pointer to the VF info 2046 * @vf: pointer to the VF info
1832 * @msg: pointer to the msg buffer 2047 * @msg: pointer to the msg buffer
@@ -1921,6 +2136,32 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
1921 case I40E_VIRTCHNL_OP_GET_STATS: 2136 case I40E_VIRTCHNL_OP_GET_STATS:
1922 valid_len = sizeof(struct i40e_virtchnl_queue_select); 2137 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1923 break; 2138 break;
2139 case I40E_VIRTCHNL_OP_IWARP:
2140 /* These messages are opaque to us and will be validated in
2141 * the RDMA client code. We just need to check for nonzero
2142 * length. The firmware will enforce max length restrictions.
2143 */
2144 if (msglen)
2145 valid_len = msglen;
2146 else
2147 err_msg_format = true;
2148 break;
2149 case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
2150 valid_len = 0;
2151 break;
2152 case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
2153 valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info);
2154 if (msglen >= valid_len) {
2155 struct i40e_virtchnl_iwarp_qvlist_info *qv =
2156 (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
2157 if (qv->num_vectors == 0) {
2158 err_msg_format = true;
2159 break;
2160 }
2161 valid_len += ((qv->num_vectors - 1) *
2162 sizeof(struct i40e_virtchnl_iwarp_qv_info));
2163 }
2164 break;
1924 /* These are always errors coming from the VF. */ 2165 /* These are always errors coming from the VF. */
1925 case I40E_VIRTCHNL_OP_EVENT: 2166 case I40E_VIRTCHNL_OP_EVENT:
1926 case I40E_VIRTCHNL_OP_UNKNOWN: 2167 case I40E_VIRTCHNL_OP_UNKNOWN:
@@ -2010,6 +2251,15 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
2010 case I40E_VIRTCHNL_OP_GET_STATS: 2251 case I40E_VIRTCHNL_OP_GET_STATS:
2011 ret = i40e_vc_get_stats_msg(vf, msg, msglen); 2252 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
2012 break; 2253 break;
2254 case I40E_VIRTCHNL_OP_IWARP:
2255 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
2256 break;
2257 case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
2258 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
2259 break;
2260 case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
2261 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
2262 break;
2013 case I40E_VIRTCHNL_OP_UNKNOWN: 2263 case I40E_VIRTCHNL_OP_UNKNOWN:
2014 default: 2264 default:
2015 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", 2265 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
@@ -2055,13 +2305,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
2055 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 2305 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
2056 vf = &pf->vf[vf_id]; 2306 vf = &pf->vf[vf_id];
2057 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); 2307 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
2058 if (reg & BIT(bit_idx)) { 2308 if (reg & BIT(bit_idx))
2059 /* clear the bit in GLGEN_VFLRSTAT */ 2309 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
2060 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 2310 i40e_reset_vf(vf, true);
2061
2062 if (!test_bit(__I40E_DOWN, &pf->state))
2063 i40e_reset_vf(vf, true);
2064 }
2065 } 2311 }
2066 2312
2067 return 0; 2313 return 0;
@@ -2525,3 +2771,45 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
2525out: 2771out:
2526 return ret; 2772 return ret;
2527} 2773}
2774
2775/**
2776 * i40e_ndo_set_vf_trust
2777 * @netdev: network interface device structure of the pf
2778 * @vf_id: VF identifier
2779 * @setting: trust setting
2780 *
2781 * Enable or disable VF trust setting
2782 **/
2783int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
2784{
2785 struct i40e_netdev_priv *np = netdev_priv(netdev);
2786 struct i40e_pf *pf = np->vsi->back;
2787 struct i40e_vf *vf;
2788 int ret = 0;
2789
2790 /* validate the request */
2791 if (vf_id >= pf->num_alloc_vfs) {
2792 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2793 return -EINVAL;
2794 }
2795
2796 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
2797 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
2798 return -EINVAL;
2799 }
2800
2801 vf = &pf->vf[vf_id];
2802
2803 if (!vf)
2804 return -EINVAL;
2805 if (setting == vf->trusted)
2806 goto out;
2807
2808 vf->trusted = setting;
2809 i40e_vc_notify_vf_reset(vf);
2810 i40e_reset_vf(vf, false);
2811 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
2812 vf_id, setting ? "" : "un");
2813out:
2814 return ret;
2815}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index e74642a0c42e..838cbd2299a4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -58,6 +58,7 @@ enum i40e_queue_ctrl {
58enum i40e_vf_states { 58enum i40e_vf_states {
59 I40E_VF_STAT_INIT = 0, 59 I40E_VF_STAT_INIT = 0,
60 I40E_VF_STAT_ACTIVE, 60 I40E_VF_STAT_ACTIVE,
61 I40E_VF_STAT_IWARPENA,
61 I40E_VF_STAT_FCOEENA, 62 I40E_VF_STAT_FCOEENA,
62 I40E_VF_STAT_DISABLED, 63 I40E_VF_STAT_DISABLED,
63}; 64};
@@ -66,6 +67,7 @@ enum i40e_vf_states {
66enum i40e_vf_capabilities { 67enum i40e_vf_capabilities {
67 I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0, 68 I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
68 I40E_VIRTCHNL_VF_CAP_L2, 69 I40E_VIRTCHNL_VF_CAP_L2,
70 I40E_VIRTCHNL_VF_CAP_IWARP,
69}; 71};
70 72
71/* VF information structure */ 73/* VF information structure */
@@ -86,6 +88,7 @@ struct i40e_vf {
86 struct i40e_virtchnl_ether_addr default_fcoe_addr; 88 struct i40e_virtchnl_ether_addr default_fcoe_addr;
87 u16 port_vlan_id; 89 u16 port_vlan_id;
88 bool pf_set_mac; /* The VMM admin set the VF MAC address */ 90 bool pf_set_mac; /* The VMM admin set the VF MAC address */
91 bool trusted;
89 92
90 /* VSI indices - actual VSI pointers are maintained in the PF structure 93 /* VSI indices - actual VSI pointers are maintained in the PF structure
91 * When assigned, these will be non-zero, because VSI 0 is always 94 * When assigned, these will be non-zero, because VSI 0 is always
@@ -106,6 +109,8 @@ struct i40e_vf {
106 bool link_forced; 109 bool link_forced;
107 bool link_up; /* only valid if VF link is forced */ 110 bool link_up; /* only valid if VF link is forced */
108 bool spoofchk; 111 bool spoofchk;
112 /* RDMA Client */
113 struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
109}; 114};
110 115
111void i40e_free_vfs(struct i40e_pf *pf); 116void i40e_free_vfs(struct i40e_pf *pf);
@@ -123,6 +128,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
123 int vf_id, u16 vlan_id, u8 qos); 128 int vf_id, u16 vlan_id, u8 qos);
124int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 129int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
125 int max_tx_rate); 130 int max_tx_rate);
131int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
126int i40e_ndo_get_vf_config(struct net_device *netdev, 132int i40e_ndo_get_vf_config(struct net_device *netdev,
127 int vf_id, struct ifla_vf_info *ivi); 133 int vf_id, struct ifla_vf_info *ivi);
128int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); 134int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index a3eae5d9a2bd..1f9b3b5d946d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -97,7 +97,6 @@ struct i40e_adminq_info {
97 u32 fw_build; /* firmware build number */ 97 u32 fw_build; /* firmware build number */
98 u16 api_maj_ver; /* api major version */ 98 u16 api_maj_ver; /* api major version */
99 u16 api_min_ver; /* api minor version */ 99 u16 api_min_ver; /* api minor version */
100 bool nvm_release_on_done;
101 100
102 struct mutex asq_mutex; /* Send queue lock */ 101 struct mutex asq_mutex; /* Send queue lock */
103 struct mutex arq_mutex; /* Receive queue lock */ 102 struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 771ac6ad8cda..4db0c0326185 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -58,6 +58,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
58 case I40E_DEV_ID_SFP_X722: 58 case I40E_DEV_ID_SFP_X722:
59 case I40E_DEV_ID_1G_BASE_T_X722: 59 case I40E_DEV_ID_1G_BASE_T_X722:
60 case I40E_DEV_ID_10G_BASE_T_X722: 60 case I40E_DEV_ID_10G_BASE_T_X722:
61 case I40E_DEV_ID_SFP_I_X722:
61 hw->mac.type = I40E_MAC_X722; 62 hw->mac.type = I40E_MAC_X722;
62 break; 63 break;
63 case I40E_DEV_ID_X722_VF: 64 case I40E_DEV_ID_X722_VF:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index ca8b58c3d1f5..70235706915e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -44,6 +44,7 @@
44#define I40E_DEV_ID_SFP_X722 0x37D0 44#define I40E_DEV_ID_SFP_X722 0x37D0
45#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 45#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
46#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 46#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
47#define I40E_DEV_ID_SFP_I_X722 0x37D3
47#define I40E_DEV_ID_X722_VF 0x37CD 48#define I40E_DEV_ID_X722_VF 0x37CD
48#define I40E_DEV_ID_X722_VF_HV 0x37D9 49#define I40E_DEV_ID_X722_VF_HV 0x37D9
49 50
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ebcc25c05796..0c912a4999db 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -155,19 +155,21 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
155 155
156/** 156/**
157 * i40e_clean_tx_irq - Reclaim resources after transmit completes 157 * i40e_clean_tx_irq - Reclaim resources after transmit completes
158 * @tx_ring: tx ring to clean 158 * @vsi: the VSI we care about
159 * @budget: how many cleans we're allowed 159 * @tx_ring: Tx ring to clean
160 * @napi_budget: Used to determine if we are in netpoll
160 * 161 *
161 * Returns true if there's any budget left (e.g. the clean is finished) 162 * Returns true if there's any budget left (e.g. the clean is finished)
162 **/ 163 **/
163static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) 164static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
165 struct i40e_ring *tx_ring, int napi_budget)
164{ 166{
165 u16 i = tx_ring->next_to_clean; 167 u16 i = tx_ring->next_to_clean;
166 struct i40e_tx_buffer *tx_buf; 168 struct i40e_tx_buffer *tx_buf;
167 struct i40e_tx_desc *tx_head; 169 struct i40e_tx_desc *tx_head;
168 struct i40e_tx_desc *tx_desc; 170 struct i40e_tx_desc *tx_desc;
169 unsigned int total_packets = 0; 171 unsigned int total_bytes = 0, total_packets = 0;
170 unsigned int total_bytes = 0; 172 unsigned int budget = vsi->work_limit;
171 173
172 tx_buf = &tx_ring->tx_bi[i]; 174 tx_buf = &tx_ring->tx_bi[i];
173 tx_desc = I40E_TX_DESC(tx_ring, i); 175 tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -197,7 +199,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
197 total_packets += tx_buf->gso_segs; 199 total_packets += tx_buf->gso_segs;
198 200
199 /* free the skb */ 201 /* free the skb */
200 dev_kfree_skb_any(tx_buf->skb); 202 napi_consume_skb(tx_buf->skb, napi_budget);
201 203
202 /* unmap skb header data */ 204 /* unmap skb header data */
203 dma_unmap_single(tx_ring->dev, 205 dma_unmap_single(tx_ring->dev,
@@ -267,7 +269,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
267 269
268 if (budget && 270 if (budget &&
269 ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && 271 ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
270 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && 272 !test_bit(__I40E_DOWN, &vsi->state) &&
271 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) 273 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
272 tx_ring->arm_wb = true; 274 tx_ring->arm_wb = true;
273 } 275 }
@@ -285,7 +287,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
285 smp_mb(); 287 smp_mb();
286 if (__netif_subqueue_stopped(tx_ring->netdev, 288 if (__netif_subqueue_stopped(tx_ring->netdev,
287 tx_ring->queue_index) && 289 tx_ring->queue_index) &&
288 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { 290 !test_bit(__I40E_DOWN, &vsi->state)) {
289 netif_wake_subqueue(tx_ring->netdev, 291 netif_wake_subqueue(tx_ring->netdev,
290 tx_ring->queue_index); 292 tx_ring->queue_index);
291 ++tx_ring->tx_stats.restart_queue; 293 ++tx_ring->tx_stats.restart_queue;
@@ -1158,7 +1160,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
1158 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) 1160 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1159 : 0; 1161 : 0;
1160#ifdef I40E_FCOE 1162#ifdef I40E_FCOE
1161 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { 1163 if (unlikely(
1164 i40e_rx_is_fcoe(rx_ptype) &&
1165 !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
1162 dev_kfree_skb_any(skb); 1166 dev_kfree_skb_any(skb);
1163 continue; 1167 continue;
1164 } 1168 }
@@ -1411,9 +1415,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1411 * budget and be more aggressive about cleaning up the Tx descriptors. 1415 * budget and be more aggressive about cleaning up the Tx descriptors.
1412 */ 1416 */
1413 i40e_for_each_ring(ring, q_vector->tx) { 1417 i40e_for_each_ring(ring, q_vector->tx) {
1414 clean_complete = clean_complete && 1418 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
1415 i40e_clean_tx_irq(ring, vsi->work_limit); 1419 clean_complete = false;
1416 arm_wb = arm_wb || ring->arm_wb; 1420 continue;
1421 }
1422 arm_wb |= ring->arm_wb;
1417 ring->arm_wb = false; 1423 ring->arm_wb = false;
1418 } 1424 }
1419 1425
@@ -1435,8 +1441,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1435 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); 1441 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1436 1442
1437 work_done += cleaned; 1443 work_done += cleaned;
1438 /* if we didn't clean as many as budgeted, we must be done */ 1444 /* if we clean as many as budgeted, we must not be done */
1439 clean_complete = clean_complete && (budget_per_ring > cleaned); 1445 if (cleaned >= budget_per_ring)
1446 clean_complete = false;
1440 } 1447 }
1441 1448
1442 /* If work not completed, return budget and polling will return */ 1449 /* If work not completed, return budget and polling will return */
@@ -1514,15 +1521,13 @@ out:
1514 1521
1515/** 1522/**
1516 * i40e_tso - set up the tso context descriptor 1523 * i40e_tso - set up the tso context descriptor
1517 * @tx_ring: ptr to the ring to send
1518 * @skb: ptr to the skb we're sending 1524 * @skb: ptr to the skb we're sending
1519 * @hdr_len: ptr to the size of the packet header 1525 * @hdr_len: ptr to the size of the packet header
1520 * @cd_type_cmd_tso_mss: Quad Word 1 1526 * @cd_type_cmd_tso_mss: Quad Word 1
1521 * 1527 *
1522 * Returns 0 if no TSO can happen, 1 if tso is going, or error 1528 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1523 **/ 1529 **/
1524static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, 1530static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
1525 u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
1526{ 1531{
1527 u64 cd_cmd, cd_tso_len, cd_mss; 1532 u64 cd_cmd, cd_tso_len, cd_mss;
1528 union { 1533 union {
@@ -1566,9 +1571,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1566 l4_offset = l4.hdr - skb->data; 1571 l4_offset = l4.hdr - skb->data;
1567 1572
1568 /* remove payload length from outer checksum */ 1573 /* remove payload length from outer checksum */
1569 paylen = (__force u16)l4.udp->check; 1574 paylen = skb->len - l4_offset;
1570 paylen += ntohs(1) * (u16)~(skb->len - l4_offset); 1575 csum_replace_by_diff(&l4.udp->check, htonl(paylen));
1571 l4.udp->check = ~csum_fold((__force __wsum)paylen);
1572 } 1576 }
1573 1577
1574 /* reset pointers to inner headers */ 1578 /* reset pointers to inner headers */
@@ -1588,9 +1592,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1588 l4_offset = l4.hdr - skb->data; 1592 l4_offset = l4.hdr - skb->data;
1589 1593
1590 /* remove payload length from inner checksum */ 1594 /* remove payload length from inner checksum */
1591 paylen = (__force u16)l4.tcp->check; 1595 paylen = skb->len - l4_offset;
1592 paylen += ntohs(1) * (u16)~(skb->len - l4_offset); 1596 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
1593 l4.tcp->check = ~csum_fold((__force __wsum)paylen);
1594 1597
1595 /* compute length of segmentation header */ 1598 /* compute length of segmentation header */
1596 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 1599 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
@@ -1630,7 +1633,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1630 unsigned char *hdr; 1633 unsigned char *hdr;
1631 } l4; 1634 } l4;
1632 unsigned char *exthdr; 1635 unsigned char *exthdr;
1633 u32 offset, cmd = 0, tunnel = 0; 1636 u32 offset, cmd = 0;
1634 __be16 frag_off; 1637 __be16 frag_off;
1635 u8 l4_proto = 0; 1638 u8 l4_proto = 0;
1636 1639
@@ -1644,6 +1647,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1644 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; 1647 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1645 1648
1646 if (skb->encapsulation) { 1649 if (skb->encapsulation) {
1650 u32 tunnel = 0;
1647 /* define outer network header type */ 1651 /* define outer network header type */
1648 if (*tx_flags & I40E_TX_FLAGS_IPV4) { 1652 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
1649 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? 1653 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
@@ -1936,6 +1940,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1936 tx_bi = first; 1940 tx_bi = first;
1937 1941
1938 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1942 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1943 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
1944
1939 if (dma_mapping_error(tx_ring->dev, dma)) 1945 if (dma_mapping_error(tx_ring->dev, dma))
1940 goto dma_error; 1946 goto dma_error;
1941 1947
@@ -1943,12 +1949,14 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1943 dma_unmap_len_set(tx_bi, len, size); 1949 dma_unmap_len_set(tx_bi, len, size);
1944 dma_unmap_addr_set(tx_bi, dma, dma); 1950 dma_unmap_addr_set(tx_bi, dma, dma);
1945 1951
1952 /* align size to end of page */
1953 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
1946 tx_desc->buffer_addr = cpu_to_le64(dma); 1954 tx_desc->buffer_addr = cpu_to_le64(dma);
1947 1955
1948 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { 1956 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
1949 tx_desc->cmd_type_offset_bsz = 1957 tx_desc->cmd_type_offset_bsz =
1950 build_ctob(td_cmd, td_offset, 1958 build_ctob(td_cmd, td_offset,
1951 I40E_MAX_DATA_PER_TXD, td_tag); 1959 max_data, td_tag);
1952 1960
1953 tx_desc++; 1961 tx_desc++;
1954 i++; 1962 i++;
@@ -1959,9 +1967,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1959 i = 0; 1967 i = 0;
1960 } 1968 }
1961 1969
1962 dma += I40E_MAX_DATA_PER_TXD; 1970 dma += max_data;
1963 size -= I40E_MAX_DATA_PER_TXD; 1971 size -= max_data;
1964 1972
1973 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
1965 tx_desc->buffer_addr = cpu_to_le64(dma); 1974 tx_desc->buffer_addr = cpu_to_le64(dma);
1966 } 1975 }
1967 1976
@@ -2110,7 +2119,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2110 if (i40e_chk_linearize(skb, count)) { 2119 if (i40e_chk_linearize(skb, count)) {
2111 if (__skb_linearize(skb)) 2120 if (__skb_linearize(skb))
2112 goto out_drop; 2121 goto out_drop;
2113 count = TXD_USE_COUNT(skb->len); 2122 count = i40e_txd_use_count(skb->len);
2114 tx_ring->tx_stats.tx_linearize++; 2123 tx_ring->tx_stats.tx_linearize++;
2115 } 2124 }
2116 2125
@@ -2141,7 +2150,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2141 else if (protocol == htons(ETH_P_IPV6)) 2150 else if (protocol == htons(ETH_P_IPV6))
2142 tx_flags |= I40E_TX_FLAGS_IPV6; 2151 tx_flags |= I40E_TX_FLAGS_IPV6;
2143 2152
2144 tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); 2153 tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
2145 2154
2146 if (tso < 0) 2155 if (tso < 0)
2147 goto out_drop; 2156 goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c1dd8c5c9666..84c28aa64fdf 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -146,10 +146,39 @@ enum i40e_dyn_idx_t {
146 146
147#define I40E_MAX_BUFFER_TXD 8 147#define I40E_MAX_BUFFER_TXD 8
148#define I40E_MIN_TX_LEN 17 148#define I40E_MIN_TX_LEN 17
149#define I40E_MAX_DATA_PER_TXD 8192 149
150/* The size limit for a transmit buffer in a descriptor is (16K - 1).
151 * In order to align with the read requests we will align the value to
152 * the nearest 4K which represents our maximum read request size.
153 */
154#define I40E_MAX_READ_REQ_SIZE 4096
155#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
156#define I40E_MAX_DATA_PER_TXD_ALIGNED \
157 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
158
159/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
160 * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
161 * that 12K is not a power of 2 and division is expensive. It is used to
162 * approximate the number of descriptors used per linear buffer. Note
163 * that this will overestimate in some cases as it doesn't account for the
164 * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
165 * the error should not impact things much as large buffers usually mean
166 * we will use fewer descriptors then there are frags in an skb.
167 */
168static inline unsigned int i40e_txd_use_count(unsigned int size)
169{
170 const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
171 const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
172 unsigned int adjust = ~(u32)0;
173
174 /* if we rounded up on the reciprocal pull down the adjustment */
175 if ((max * reciprocal) > adjust)
176 adjust = ~(u32)(reciprocal - 1);
177
178 return (u32)((((u64)size * reciprocal) + adjust) >> 32);
179}
150 180
151/* Tx Descriptors needed, worst case */ 181/* Tx Descriptors needed, worst case */
152#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
153#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 182#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
154#define I40E_MIN_DESC_PENDING 4 183#define I40E_MIN_DESC_PENDING 4
155 184
@@ -359,7 +388,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
359 int count = 0, size = skb_headlen(skb); 388 int count = 0, size = skb_headlen(skb);
360 389
361 for (;;) { 390 for (;;) {
362 count += TXD_USE_COUNT(size); 391 count += i40e_txd_use_count(size);
363 392
364 if (!nr_frags--) 393 if (!nr_frags--)
365 break; 394 break;
@@ -401,4 +430,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
401 430
402 return __i40evf_chk_linearize(skb); 431 return __i40evf_chk_linearize(skb);
403} 432}
433
434/**
435 * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
436 * @ptype: the packet type field from Rx descriptor write-back
437 **/
438static inline bool i40e_rx_is_fcoe(u16 ptype)
439{
440 return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
441 (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
442}
404#endif /* _I40E_TXRX_H_ */ 443#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 301fe2b6dd03..4a78c18e0b7b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -522,6 +522,7 @@ struct i40e_hw {
522 enum i40e_nvmupd_state nvmupd_state; 522 enum i40e_nvmupd_state nvmupd_state;
523 struct i40e_aq_desc nvm_wb_desc; 523 struct i40e_aq_desc nvm_wb_desc;
524 struct i40e_virt_mem nvm_buff; 524 struct i40e_virt_mem nvm_buff;
525 bool nvm_release_on_done;
525 526
526 /* HMC info */ 527 /* HMC info */
527 struct i40e_hmc_info hmc; /* HMC info struct */ 528 struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1329,4 +1330,46 @@ enum i40e_reset_type {
1329 1330
1330/* RSS Hash Table Size */ 1331/* RSS Hash Table Size */
1331#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 1332#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
1333
1334/* INPUT SET MASK for RSS, flow director and flexible payload */
1335#define I40E_FD_INSET_L3_SRC_SHIFT 47
1336#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \
1337 I40E_FD_INSET_L3_SRC_SHIFT)
1338#define I40E_FD_INSET_L3_DST_SHIFT 35
1339#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \
1340 I40E_FD_INSET_L3_DST_SHIFT)
1341#define I40E_FD_INSET_L4_SRC_SHIFT 34
1342#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \
1343 I40E_FD_INSET_L4_SRC_SHIFT)
1344#define I40E_FD_INSET_L4_DST_SHIFT 33
1345#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \
1346 I40E_FD_INSET_L4_DST_SHIFT)
1347#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31
1348#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \
1349 I40E_FD_INSET_VERIFY_TAG_SHIFT)
1350
1351#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17
1352#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \
1353 I40E_FD_INSET_FLEX_WORD50_SHIFT)
1354#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16
1355#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \
1356 I40E_FD_INSET_FLEX_WORD51_SHIFT)
1357#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15
1358#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \
1359 I40E_FD_INSET_FLEX_WORD52_SHIFT)
1360#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14
1361#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \
1362 I40E_FD_INSET_FLEX_WORD53_SHIFT)
1363#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13
1364#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \
1365 I40E_FD_INSET_FLEX_WORD54_SHIFT)
1366#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12
1367#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \
1368 I40E_FD_INSET_FLEX_WORD55_SHIFT)
1369#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11
1370#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \
1371 I40E_FD_INSET_FLEX_WORD56_SHIFT)
1372#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10
1373#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
1374 I40E_FD_INSET_FLEX_WORD57_SHIFT)
1332#endif /* _I40E_TYPE_H_ */ 1375#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index 3b9d2037456c..f04ce6cb70dc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -80,7 +80,12 @@ enum i40e_virtchnl_ops {
80 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, 80 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
81 I40E_VIRTCHNL_OP_GET_STATS = 15, 81 I40E_VIRTCHNL_OP_GET_STATS = 15,
82 I40E_VIRTCHNL_OP_FCOE = 16, 82 I40E_VIRTCHNL_OP_FCOE = 16,
83 I40E_VIRTCHNL_OP_EVENT = 17, 83 I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
84 I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
85 I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
86 I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
87 I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
88
84}; 89};
85 90
86/* Virtual channel message descriptor. This overlays the admin queue 91/* Virtual channel message descriptor. This overlays the admin queue
@@ -154,6 +159,7 @@ struct i40e_virtchnl_vsi_resource {
154#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 159#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
155#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 160#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
156#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 161#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
162#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
157 163
158struct i40e_virtchnl_vf_resource { 164struct i40e_virtchnl_vf_resource {
159 u16 num_vsis; 165 u16 num_vsis;
@@ -162,8 +168,8 @@ struct i40e_virtchnl_vf_resource {
162 u16 max_mtu; 168 u16 max_mtu;
163 169
164 u32 vf_offload_flags; 170 u32 vf_offload_flags;
165 u32 max_fcoe_contexts; 171 u32 rss_key_size;
166 u32 max_fcoe_filters; 172 u32 rss_lut_size;
167 173
168 struct i40e_virtchnl_vsi_resource vsi_res[1]; 174 struct i40e_virtchnl_vsi_resource vsi_res[1];
169}; 175};
@@ -322,6 +328,39 @@ struct i40e_virtchnl_promisc_info {
322 * PF replies with struct i40e_eth_stats in an external buffer. 328 * PF replies with struct i40e_eth_stats in an external buffer.
323 */ 329 */
324 330
331/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
332 * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
333 * VF sends these messages to configure RSS. Only supported if both PF
334 * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
335 * configuration negotiation. If this is the case, then the RSS fields in
336 * the VF resource struct are valid.
337 * Both the key and LUT are initialized to 0 by the PF, meaning that
338 * RSS is effectively disabled until set up by the VF.
339 */
340struct i40e_virtchnl_rss_key {
341 u16 vsi_id;
342 u16 key_len;
343 u8 key[1]; /* RSS hash key, packed bytes */
344};
345
346struct i40e_virtchnl_rss_lut {
347 u16 vsi_id;
348 u16 lut_entries;
349 u8 lut[1]; /* RSS lookup table*/
350};
351
352/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
353 * I40E_VIRTCHNL_OP_SET_RSS_HENA
354 * VF sends these messages to get and set the hash filter enable bits for RSS.
355 * By default, the PF sets these to all possible traffic types that the
356 * hardware supports. The VF can query this value if it wants to change the
357 * traffic types that are hashed by the hardware.
358 * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
359 */
360struct i40e_virtchnl_rss_hena {
361 u64 hena;
362};
363
325/* I40E_VIRTCHNL_OP_EVENT 364/* I40E_VIRTCHNL_OP_EVENT
326 * PF sends this message to inform the VF driver of events that may affect it. 365 * PF sends this message to inform the VF driver of events that may affect it.
327 * No direct response is expected from the VF, though it may generate other 366 * No direct response is expected from the VF, though it may generate other
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4b70aae2fa84..9110319a8f00 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -37,8 +37,8 @@ static const char i40evf_driver_string[] =
37#define DRV_KERN "-k" 37#define DRV_KERN "-k"
38 38
39#define DRV_VERSION_MAJOR 1 39#define DRV_VERSION_MAJOR 1
40#define DRV_VERSION_MINOR 4 40#define DRV_VERSION_MINOR 5
41#define DRV_VERSION_BUILD 15 41#define DRV_VERSION_BUILD 5
42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
43 __stringify(DRV_VERSION_MINOR) "." \ 43 __stringify(DRV_VERSION_MINOR) "." \
44 __stringify(DRV_VERSION_BUILD) \ 44 __stringify(DRV_VERSION_BUILD) \
@@ -1341,7 +1341,7 @@ static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1341 } 1341 }
1342 1342
1343 if (lut) { 1343 if (lut) {
1344 ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size); 1344 ret = i40evf_aq_get_rss_lut(hw, vsi->id, false, lut, lut_size);
1345 if (ret) { 1345 if (ret) {
1346 dev_err(&adapter->pdev->dev, 1346 dev_err(&adapter->pdev->dev,
1347 "Cannot get RSS lut, err %s aq_err %s\n", 1347 "Cannot get RSS lut, err %s aq_err %s\n",
@@ -1507,7 +1507,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1507 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1507 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1508 GFP_KERNEL); 1508 GFP_KERNEL);
1509 if (!adapter->q_vectors) 1509 if (!adapter->q_vectors)
1510 goto err_out; 1510 return -ENOMEM;
1511 1511
1512 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1512 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1513 q_vector = &adapter->q_vectors[q_idx]; 1513 q_vector = &adapter->q_vectors[q_idx];
@@ -1519,15 +1519,6 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1519 } 1519 }
1520 1520
1521 return 0; 1521 return 0;
1522
1523err_out:
1524 while (q_idx) {
1525 q_idx--;
1526 q_vector = &adapter->q_vectors[q_idx];
1527 netif_napi_del(&q_vector->napi);
1528 }
1529 kfree(adapter->q_vectors);
1530 return -ENOMEM;
1531} 1522}
1532 1523
1533/** 1524/**
@@ -2003,6 +1994,8 @@ static void i40evf_adminq_task(struct work_struct *work)
2003 1994
2004 /* check for error indications */ 1995 /* check for error indications */
2005 val = rd32(hw, hw->aq.arq.len); 1996 val = rd32(hw, hw->aq.arq.len);
1997 if (val == 0xdeadbeef) /* indicates device in reset */
1998 goto freedom;
2006 oldval = val; 1999 oldval = val;
2007 if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) { 2000 if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2008 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2001 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
@@ -2259,6 +2252,28 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
2259 return 0; 2252 return 0;
2260} 2253}
2261 2254
2255#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
2256 NETIF_F_HW_VLAN_CTAG_RX |\
2257 NETIF_F_HW_VLAN_CTAG_FILTER)
2258
2259/**
2260 * i40evf_fix_features - fix up the netdev feature bits
2261 * @netdev: our net device
2262 * @features: desired feature bits
2263 *
2264 * Returns fixed-up features bits
2265 **/
2266static netdev_features_t i40evf_fix_features(struct net_device *netdev,
2267 netdev_features_t features)
2268{
2269 struct i40evf_adapter *adapter = netdev_priv(netdev);
2270
2271 features &= ~I40EVF_VLAN_FEATURES;
2272 if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
2273 features |= I40EVF_VLAN_FEATURES;
2274 return features;
2275}
2276
2262static const struct net_device_ops i40evf_netdev_ops = { 2277static const struct net_device_ops i40evf_netdev_ops = {
2263 .ndo_open = i40evf_open, 2278 .ndo_open = i40evf_open,
2264 .ndo_stop = i40evf_close, 2279 .ndo_stop = i40evf_close,
@@ -2271,6 +2286,7 @@ static const struct net_device_ops i40evf_netdev_ops = {
2271 .ndo_tx_timeout = i40evf_tx_timeout, 2286 .ndo_tx_timeout = i40evf_tx_timeout,
2272 .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, 2287 .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
2273 .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, 2288 .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
2289 .ndo_fix_features = i40evf_fix_features,
2274#ifdef CONFIG_NET_POLL_CONTROLLER 2290#ifdef CONFIG_NET_POLL_CONTROLLER
2275 .ndo_poll_controller = i40evf_netpoll, 2291 .ndo_poll_controller = i40evf_netpoll,
2276#endif 2292#endif
@@ -2307,29 +2323,20 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
2307 **/ 2323 **/
2308int i40evf_process_config(struct i40evf_adapter *adapter) 2324int i40evf_process_config(struct i40evf_adapter *adapter)
2309{ 2325{
2326 struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
2310 struct net_device *netdev = adapter->netdev; 2327 struct net_device *netdev = adapter->netdev;
2311 int i; 2328 int i;
2312 2329
2313 /* got VF config message back from PF, now we can parse it */ 2330 /* got VF config message back from PF, now we can parse it */
2314 for (i = 0; i < adapter->vf_res->num_vsis; i++) { 2331 for (i = 0; i < vfres->num_vsis; i++) {
2315 if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) 2332 if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
2316 adapter->vsi_res = &adapter->vf_res->vsi_res[i]; 2333 adapter->vsi_res = &vfres->vsi_res[i];
2317 } 2334 }
2318 if (!adapter->vsi_res) { 2335 if (!adapter->vsi_res) {
2319 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 2336 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2320 return -ENODEV; 2337 return -ENODEV;
2321 } 2338 }
2322 2339
2323 if (adapter->vf_res->vf_offload_flags
2324 & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
2325 netdev->vlan_features = netdev->features &
2326 ~(NETIF_F_HW_VLAN_CTAG_TX |
2327 NETIF_F_HW_VLAN_CTAG_RX |
2328 NETIF_F_HW_VLAN_CTAG_FILTER);
2329 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2330 NETIF_F_HW_VLAN_CTAG_RX |
2331 NETIF_F_HW_VLAN_CTAG_FILTER;
2332 }
2333 netdev->features |= NETIF_F_HIGHDMA | 2340 netdev->features |= NETIF_F_HIGHDMA |
2334 NETIF_F_SG | 2341 NETIF_F_SG |
2335 NETIF_F_IP_CSUM | 2342 NETIF_F_IP_CSUM |
@@ -2338,7 +2345,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
2338 NETIF_F_TSO | 2345 NETIF_F_TSO |
2339 NETIF_F_TSO6 | 2346 NETIF_F_TSO6 |
2340 NETIF_F_TSO_ECN | 2347 NETIF_F_TSO_ECN |
2341 NETIF_F_GSO_GRE | 2348 NETIF_F_GSO_GRE |
2342 NETIF_F_GSO_UDP_TUNNEL | 2349 NETIF_F_GSO_UDP_TUNNEL |
2343 NETIF_F_RXCSUM | 2350 NETIF_F_RXCSUM |
2344 NETIF_F_GRO; 2351 NETIF_F_GRO;
@@ -2355,9 +2362,15 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
2355 if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE) 2362 if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)
2356 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2363 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
2357 2364
2365 /* always clear VLAN features because they can change at every reset */
2366 netdev->features &= ~(I40EVF_VLAN_FEATURES);
2358 /* copy netdev features into list of user selectable features */ 2367 /* copy netdev features into list of user selectable features */
2359 netdev->hw_features |= netdev->features; 2368 netdev->hw_features |= netdev->features;
2360 netdev->hw_features &= ~NETIF_F_RXCSUM; 2369
2370 if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
2371 netdev->vlan_features = netdev->features;
2372 netdev->features |= I40EVF_VLAN_FEATURES;
2373 }
2361 2374
2362 adapter->vsi.id = adapter->vsi_res->vsi_id; 2375 adapter->vsi.id = adapter->vsi_res->vsi_id;
2363 2376
@@ -2838,11 +2851,11 @@ static void i40evf_remove(struct pci_dev *pdev)
2838 adapter->state = __I40EVF_REMOVE; 2851 adapter->state = __I40EVF_REMOVE;
2839 adapter->aq_required = 0; 2852 adapter->aq_required = 0;
2840 i40evf_request_reset(adapter); 2853 i40evf_request_reset(adapter);
2841 msleep(20); 2854 msleep(50);
2842 /* If the FW isn't responding, kick it once, but only once. */ 2855 /* If the FW isn't responding, kick it once, but only once. */
2843 if (!i40evf_asq_done(hw)) { 2856 if (!i40evf_asq_done(hw)) {
2844 i40evf_request_reset(adapter); 2857 i40evf_request_reset(adapter);
2845 msleep(20); 2858 msleep(50);
2846 } 2859 }
2847 2860
2848 if (adapter->msix_entries) { 2861 if (adapter->msix_entries) {
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7982243d1f9b..bb4d6cdcd0b8 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2831,7 +2831,8 @@ static int igb_get_module_eeprom(struct net_device *netdev,
2831 2831
2832 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ 2832 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
2833 for (i = 0; i < last_word - first_word + 1; i++) { 2833 for (i = 0; i < last_word - first_word + 1; i++) {
2834 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); 2834 status = igb_read_phy_reg_i2c(hw, (first_word + i) * 2,
2835 &dataword[i]);
2835 if (status) { 2836 if (status) {
2836 /* Error occurred while reading module */ 2837 /* Error occurred while reading module */
2837 kfree(dataword); 2838 kfree(dataword);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 55a1405cb2a1..8e96c35307fb 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -50,6 +50,7 @@
50#include <linux/aer.h> 50#include <linux/aer.h>
51#include <linux/prefetch.h> 51#include <linux/prefetch.h>
52#include <linux/pm_runtime.h> 52#include <linux/pm_runtime.h>
53#include <linux/etherdevice.h>
53#ifdef CONFIG_IGB_DCA 54#ifdef CONFIG_IGB_DCA
54#include <linux/dca.h> 55#include <linux/dca.h>
55#endif 56#endif
@@ -150,7 +151,7 @@ static void igb_update_dca(struct igb_q_vector *);
150static void igb_setup_dca(struct igb_adapter *); 151static void igb_setup_dca(struct igb_adapter *);
151#endif /* CONFIG_IGB_DCA */ 152#endif /* CONFIG_IGB_DCA */
152static int igb_poll(struct napi_struct *, int); 153static int igb_poll(struct napi_struct *, int);
153static bool igb_clean_tx_irq(struct igb_q_vector *); 154static bool igb_clean_tx_irq(struct igb_q_vector *, int);
154static int igb_clean_rx_irq(struct igb_q_vector *, int); 155static int igb_clean_rx_irq(struct igb_q_vector *, int);
155static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 156static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
156static void igb_tx_timeout(struct net_device *); 157static void igb_tx_timeout(struct net_device *);
@@ -2442,9 +2443,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2442 break; 2443 break;
2443 } 2444 }
2444 2445
2445 /* copy the MAC address out of the NVM */ 2446 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
2446 if (hw->mac.ops.read_mac_addr(hw)) 2447 /* copy the MAC address out of the NVM */
2447 dev_err(&pdev->dev, "NVM Read Error\n"); 2448 if (hw->mac.ops.read_mac_addr(hw))
2449 dev_err(&pdev->dev, "NVM Read Error\n");
2450 }
2448 2451
2449 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 2452 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2450 2453
@@ -6522,13 +6525,14 @@ static int igb_poll(struct napi_struct *napi, int budget)
6522 igb_update_dca(q_vector); 6525 igb_update_dca(q_vector);
6523#endif 6526#endif
6524 if (q_vector->tx.ring) 6527 if (q_vector->tx.ring)
6525 clean_complete = igb_clean_tx_irq(q_vector); 6528 clean_complete = igb_clean_tx_irq(q_vector, budget);
6526 6529
6527 if (q_vector->rx.ring) { 6530 if (q_vector->rx.ring) {
6528 int cleaned = igb_clean_rx_irq(q_vector, budget); 6531 int cleaned = igb_clean_rx_irq(q_vector, budget);
6529 6532
6530 work_done += cleaned; 6533 work_done += cleaned;
6531 clean_complete &= (cleaned < budget); 6534 if (cleaned >= budget)
6535 clean_complete = false;
6532 } 6536 }
6533 6537
6534 /* If all work not completed, return budget and keep polling */ 6538 /* If all work not completed, return budget and keep polling */
@@ -6545,10 +6549,11 @@ static int igb_poll(struct napi_struct *napi, int budget)
6545/** 6549/**
6546 * igb_clean_tx_irq - Reclaim resources after transmit completes 6550 * igb_clean_tx_irq - Reclaim resources after transmit completes
6547 * @q_vector: pointer to q_vector containing needed info 6551 * @q_vector: pointer to q_vector containing needed info
6552 * @napi_budget: Used to determine if we are in netpoll
6548 * 6553 *
6549 * returns true if ring is completely cleaned 6554 * returns true if ring is completely cleaned
6550 **/ 6555 **/
6551static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) 6556static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
6552{ 6557{
6553 struct igb_adapter *adapter = q_vector->adapter; 6558 struct igb_adapter *adapter = q_vector->adapter;
6554 struct igb_ring *tx_ring = q_vector->tx.ring; 6559 struct igb_ring *tx_ring = q_vector->tx.ring;
@@ -6587,7 +6592,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6587 total_packets += tx_buffer->gso_segs; 6592 total_packets += tx_buffer->gso_segs;
6588 6593
6589 /* free the skb */ 6594 /* free the skb */
6590 dev_consume_skb_any(tx_buffer->skb); 6595 napi_consume_skb(tx_buffer->skb, napi_budget);
6591 6596
6592 /* unmap skb header data */ 6597 /* unmap skb header data */
6593 dma_unmap_single(tx_ring->dev, 6598 dma_unmap_single(tx_ring->dev,
@@ -7574,7 +7579,6 @@ static int igb_resume(struct device *dev)
7574 7579
7575 if (igb_init_interrupt_scheme(adapter, true)) { 7580 if (igb_init_interrupt_scheme(adapter, true)) {
7576 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 7581 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
7577 rtnl_unlock();
7578 return -ENOMEM; 7582 return -ENOMEM;
7579 } 7583 }
7580 7584
@@ -7845,11 +7849,13 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7845 struct e1000_hw *hw = &adapter->hw; 7849 struct e1000_hw *hw = &adapter->hw;
7846 u32 rar_low, rar_high; 7850 u32 rar_low, rar_high;
7847 7851
7848 /* HW expects these in little endian so we reverse the byte order 7852 /* HW expects these to be in network order when they are plugged
7849 * from network order (big endian) to CPU endian 7853 * into the registers which are little endian. In order to guarantee
7854 * that ordering we need to do an leXX_to_cpup here in order to be
7855 * ready for the byteswap that occurs with writel
7850 */ 7856 */
7851 rar_low = le32_to_cpup((__be32 *)(addr)); 7857 rar_low = le32_to_cpup((__le32 *)(addr));
7852 rar_high = le16_to_cpup((__be16 *)(addr + 4)); 7858 rar_high = le16_to_cpup((__le16 *)(addr + 4));
7853 7859
7854 /* Indicate to hardware the Address is Valid. */ 7860 /* Indicate to hardware the Address is Valid. */
7855 rar_high |= E1000_RAH_AV; 7861 rar_high |= E1000_RAH_AV;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 84fa28ceb200..d10ed62993c1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -456,7 +456,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
456 IXGBE_QV_STATE_POLL); 456 IXGBE_QV_STATE_POLL);
457#ifdef BP_EXTENDED_STATS 457#ifdef BP_EXTENDED_STATS
458 if (rc != IXGBE_QV_STATE_IDLE) 458 if (rc != IXGBE_QV_STATE_IDLE)
459 q_vector->tx.ring->stats.yields++; 459 q_vector->rx.ring->stats.yields++;
460#endif 460#endif
461 return rc == IXGBE_QV_STATE_IDLE; 461 return rc == IXGBE_QV_STATE_IDLE;
462} 462}
@@ -661,9 +661,7 @@ struct ixgbe_adapter {
661#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) 661#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
662#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) 662#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
663#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) 663#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
664#ifdef CONFIG_IXGBE_VXLAN
665#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) 664#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
666#endif
667#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) 665#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
668 666
669 /* Tx fast path data */ 667 /* Tx fast path data */
@@ -675,6 +673,9 @@ struct ixgbe_adapter {
675 int num_rx_queues; 673 int num_rx_queues;
676 u16 rx_itr_setting; 674 u16 rx_itr_setting;
677 675
676 /* Port number used to identify VXLAN traffic */
677 __be16 vxlan_port;
678
678 /* TX */ 679 /* TX */
679 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; 680 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
680 681
@@ -782,9 +783,6 @@ struct ixgbe_adapter {
782 u32 timer_event_accumulator; 783 u32 timer_event_accumulator;
783 u32 vferr_refcount; 784 u32 vferr_refcount;
784 struct ixgbe_mac_addr *mac_table; 785 struct ixgbe_mac_addr *mac_table;
785#ifdef CONFIG_IXGBE_VXLAN
786 u16 vxlan_port;
787#endif
788 struct kobject *info_kobj; 786 struct kobject *info_kobj;
789#ifdef CONFIG_IXGBE_HWMON 787#ifdef CONFIG_IXGBE_HWMON
790 struct hwmon_buff *ixgbe_hwmon_buff; 788 struct hwmon_buff *ixgbe_hwmon_buff;
@@ -819,6 +817,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
819 return IXGBE_MAX_RSS_INDICES; 817 return IXGBE_MAX_RSS_INDICES;
820 case ixgbe_mac_X550: 818 case ixgbe_mac_X550:
821 case ixgbe_mac_X550EM_x: 819 case ixgbe_mac_X550EM_x:
820 case ixgbe_mac_x550em_a:
822 return IXGBE_MAX_RSS_INDICES_X550; 821 return IXGBE_MAX_RSS_INDICES_X550;
823 default: 822 default:
824 return 0; 823 return 0;
@@ -862,13 +861,15 @@ enum ixgbe_boards {
862 board_X540, 861 board_X540,
863 board_X550, 862 board_X550,
864 board_X550EM_x, 863 board_X550EM_x,
864 board_x550em_a,
865}; 865};
866 866
867extern struct ixgbe_info ixgbe_82598_info; 867extern const struct ixgbe_info ixgbe_82598_info;
868extern struct ixgbe_info ixgbe_82599_info; 868extern const struct ixgbe_info ixgbe_82599_info;
869extern struct ixgbe_info ixgbe_X540_info; 869extern const struct ixgbe_info ixgbe_X540_info;
870extern struct ixgbe_info ixgbe_X550_info; 870extern const struct ixgbe_info ixgbe_X550_info;
871extern struct ixgbe_info ixgbe_X550EM_x_info; 871extern const struct ixgbe_info ixgbe_X550EM_x_info;
872extern const struct ixgbe_info ixgbe_x550em_a_info;
872#ifdef CONFIG_IXGBE_DCB 873#ifdef CONFIG_IXGBE_DCB
873extern const struct dcbnl_rtnl_ops dcbnl_ops; 874extern const struct dcbnl_rtnl_ops dcbnl_ops;
874#endif 875#endif
@@ -879,6 +880,8 @@ extern const char ixgbe_driver_version[];
879extern char ixgbe_default_device_descr[]; 880extern char ixgbe_default_device_descr[];
880#endif /* IXGBE_FCOE */ 881#endif /* IXGBE_FCOE */
881 882
883int ixgbe_open(struct net_device *netdev);
884int ixgbe_close(struct net_device *netdev);
882void ixgbe_up(struct ixgbe_adapter *adapter); 885void ixgbe_up(struct ixgbe_adapter *adapter);
883void ixgbe_down(struct ixgbe_adapter *adapter); 886void ixgbe_down(struct ixgbe_adapter *adapter);
884void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 887void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index d8a9fb8a59e2..6ecd598c6ef5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2015 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -1160,7 +1160,7 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1160 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); 1160 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1161} 1161}
1162 1162
1163static struct ixgbe_mac_operations mac_ops_82598 = { 1163static const struct ixgbe_mac_operations mac_ops_82598 = {
1164 .init_hw = &ixgbe_init_hw_generic, 1164 .init_hw = &ixgbe_init_hw_generic,
1165 .reset_hw = &ixgbe_reset_hw_82598, 1165 .reset_hw = &ixgbe_reset_hw_82598,
1166 .start_hw = &ixgbe_start_hw_82598, 1166 .start_hw = &ixgbe_start_hw_82598,
@@ -1192,9 +1192,11 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1192 .clear_vfta = &ixgbe_clear_vfta_82598, 1192 .clear_vfta = &ixgbe_clear_vfta_82598,
1193 .set_vfta = &ixgbe_set_vfta_82598, 1193 .set_vfta = &ixgbe_set_vfta_82598,
1194 .fc_enable = &ixgbe_fc_enable_82598, 1194 .fc_enable = &ixgbe_fc_enable_82598,
1195 .setup_fc = ixgbe_setup_fc_generic,
1195 .set_fw_drv_ver = NULL, 1196 .set_fw_drv_ver = NULL,
1196 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, 1197 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1197 .release_swfw_sync = &ixgbe_release_swfw_sync, 1198 .release_swfw_sync = &ixgbe_release_swfw_sync,
1199 .init_swfw_sync = NULL,
1198 .get_thermal_sensor_data = NULL, 1200 .get_thermal_sensor_data = NULL,
1199 .init_thermal_sensor_thresh = NULL, 1201 .init_thermal_sensor_thresh = NULL,
1200 .prot_autoc_read = &prot_autoc_read_generic, 1202 .prot_autoc_read = &prot_autoc_read_generic,
@@ -1203,7 +1205,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1203 .disable_rx = &ixgbe_disable_rx_generic, 1205 .disable_rx = &ixgbe_disable_rx_generic,
1204}; 1206};
1205 1207
1206static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1208static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1207 .init_params = &ixgbe_init_eeprom_params_generic, 1209 .init_params = &ixgbe_init_eeprom_params_generic,
1208 .read = &ixgbe_read_eerd_generic, 1210 .read = &ixgbe_read_eerd_generic,
1209 .write = &ixgbe_write_eeprom_generic, 1211 .write = &ixgbe_write_eeprom_generic,
@@ -1214,7 +1216,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1214 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 1216 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1215}; 1217};
1216 1218
1217static struct ixgbe_phy_operations phy_ops_82598 = { 1219static const struct ixgbe_phy_operations phy_ops_82598 = {
1218 .identify = &ixgbe_identify_phy_generic, 1220 .identify = &ixgbe_identify_phy_generic,
1219 .identify_sfp = &ixgbe_identify_module_generic, 1221 .identify_sfp = &ixgbe_identify_module_generic,
1220 .init = &ixgbe_init_phy_ops_82598, 1222 .init = &ixgbe_init_phy_ops_82598,
@@ -1230,7 +1232,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
1230 .check_overtemp = &ixgbe_tn_check_overtemp, 1232 .check_overtemp = &ixgbe_tn_check_overtemp,
1231}; 1233};
1232 1234
1233struct ixgbe_info ixgbe_82598_info = { 1235const struct ixgbe_info ixgbe_82598_info = {
1234 .mac = ixgbe_mac_82598EB, 1236 .mac = ixgbe_mac_82598EB,
1235 .get_invariants = &ixgbe_get_invariants_82598, 1237 .get_invariants = &ixgbe_get_invariants_82598,
1236 .mac_ops = &mac_ops_82598, 1238 .mac_ops = &mac_ops_82598,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index fa8d4f40ac2a..01519787324a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2015 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -1633,6 +1633,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1633 switch (hw->mac.type) { 1633 switch (hw->mac.type) {
1634 case ixgbe_mac_X550: 1634 case ixgbe_mac_X550:
1635 case ixgbe_mac_X550EM_x: 1635 case ixgbe_mac_X550EM_x:
1636 case ixgbe_mac_x550em_a:
1636 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); 1637 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
1637 break; 1638 break;
1638 default: 1639 default:
@@ -2181,7 +2182,7 @@ release_i2c_access:
2181 return status; 2182 return status;
2182} 2183}
2183 2184
2184static struct ixgbe_mac_operations mac_ops_82599 = { 2185static const struct ixgbe_mac_operations mac_ops_82599 = {
2185 .init_hw = &ixgbe_init_hw_generic, 2186 .init_hw = &ixgbe_init_hw_generic,
2186 .reset_hw = &ixgbe_reset_hw_82599, 2187 .reset_hw = &ixgbe_reset_hw_82599,
2187 .start_hw = &ixgbe_start_hw_82599, 2188 .start_hw = &ixgbe_start_hw_82599,
@@ -2220,6 +2221,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2220 .clear_vfta = &ixgbe_clear_vfta_generic, 2221 .clear_vfta = &ixgbe_clear_vfta_generic,
2221 .set_vfta = &ixgbe_set_vfta_generic, 2222 .set_vfta = &ixgbe_set_vfta_generic,
2222 .fc_enable = &ixgbe_fc_enable_generic, 2223 .fc_enable = &ixgbe_fc_enable_generic,
2224 .setup_fc = ixgbe_setup_fc_generic,
2223 .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, 2225 .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
2224 .init_uta_tables = &ixgbe_init_uta_tables_generic, 2226 .init_uta_tables = &ixgbe_init_uta_tables_generic,
2225 .setup_sfp = &ixgbe_setup_sfp_modules_82599, 2227 .setup_sfp = &ixgbe_setup_sfp_modules_82599,
@@ -2227,6 +2229,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2227 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 2229 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
2228 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, 2230 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
2229 .release_swfw_sync = &ixgbe_release_swfw_sync, 2231 .release_swfw_sync = &ixgbe_release_swfw_sync,
2232 .init_swfw_sync = NULL,
2230 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, 2233 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
2231 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, 2234 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
2232 .prot_autoc_read = &prot_autoc_read_82599, 2235 .prot_autoc_read = &prot_autoc_read_82599,
@@ -2235,7 +2238,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2235 .disable_rx = &ixgbe_disable_rx_generic, 2238 .disable_rx = &ixgbe_disable_rx_generic,
2236}; 2239};
2237 2240
2238static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2241static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2239 .init_params = &ixgbe_init_eeprom_params_generic, 2242 .init_params = &ixgbe_init_eeprom_params_generic,
2240 .read = &ixgbe_read_eeprom_82599, 2243 .read = &ixgbe_read_eeprom_82599,
2241 .read_buffer = &ixgbe_read_eeprom_buffer_82599, 2244 .read_buffer = &ixgbe_read_eeprom_buffer_82599,
@@ -2246,7 +2249,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2246 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 2249 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
2247}; 2250};
2248 2251
2249static struct ixgbe_phy_operations phy_ops_82599 = { 2252static const struct ixgbe_phy_operations phy_ops_82599 = {
2250 .identify = &ixgbe_identify_phy_82599, 2253 .identify = &ixgbe_identify_phy_82599,
2251 .identify_sfp = &ixgbe_identify_module_generic, 2254 .identify_sfp = &ixgbe_identify_module_generic,
2252 .init = &ixgbe_init_phy_ops_82599, 2255 .init = &ixgbe_init_phy_ops_82599,
@@ -2263,7 +2266,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
2263 .check_overtemp = &ixgbe_tn_check_overtemp, 2266 .check_overtemp = &ixgbe_tn_check_overtemp,
2264}; 2267};
2265 2268
2266struct ixgbe_info ixgbe_82599_info = { 2269const struct ixgbe_info ixgbe_82599_info = {
2267 .mac = ixgbe_mac_82599EB, 2270 .mac = ixgbe_mac_82599EB,
2268 .get_invariants = &ixgbe_get_invariants_82599, 2271 .get_invariants = &ixgbe_get_invariants_82599,
2269 .mac_ops = &mac_ops_82599, 2272 .mac_ops = &mac_ops_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 64045053e874..737443a015d5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2015 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -97,6 +97,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
97 case IXGBE_DEV_ID_X540T: 97 case IXGBE_DEV_ID_X540T:
98 case IXGBE_DEV_ID_X540T1: 98 case IXGBE_DEV_ID_X540T1:
99 case IXGBE_DEV_ID_X550T: 99 case IXGBE_DEV_ID_X550T:
100 case IXGBE_DEV_ID_X550T1:
100 case IXGBE_DEV_ID_X550EM_X_10G_T: 101 case IXGBE_DEV_ID_X550EM_X_10G_T:
101 supported = true; 102 supported = true;
102 break; 103 break;
@@ -111,12 +112,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
111} 112}
112 113
113/** 114/**
114 * ixgbe_setup_fc - Set up flow control 115 * ixgbe_setup_fc_generic - Set up flow control
115 * @hw: pointer to hardware structure 116 * @hw: pointer to hardware structure
116 * 117 *
117 * Called at init time to set up flow control. 118 * Called at init time to set up flow control.
118 **/ 119 **/
119static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) 120s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
120{ 121{
121 s32 ret_val = 0; 122 s32 ret_val = 0;
122 u32 reg = 0, reg_bp = 0; 123 u32 reg = 0, reg_bp = 0;
@@ -296,7 +297,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
296 IXGBE_WRITE_FLUSH(hw); 297 IXGBE_WRITE_FLUSH(hw);
297 298
298 /* Setup flow control */ 299 /* Setup flow control */
299 ret_val = ixgbe_setup_fc(hw); 300 ret_val = hw->mac.ops.setup_fc(hw);
300 if (ret_val) 301 if (ret_val)
301 return ret_val; 302 return ret_val;
302 303
@@ -681,6 +682,7 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
681void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 682void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
682{ 683{
683 struct ixgbe_bus_info *bus = &hw->bus; 684 struct ixgbe_bus_info *bus = &hw->bus;
685 u16 ee_ctrl_4;
684 u32 reg; 686 u32 reg;
685 687
686 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 688 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
@@ -691,6 +693,13 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
691 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); 693 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
692 if (reg & IXGBE_FACTPS_LFS) 694 if (reg & IXGBE_FACTPS_LFS)
693 bus->func ^= 0x1; 695 bus->func ^= 0x1;
696
697 /* Get MAC instance from EEPROM for configuring CS4227 */
698 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
699 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
700 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
701 IXGBE_EE_CTRL_4_INST_ID_SHIFT;
702 }
694} 703}
695 704
696/** 705/**
@@ -2854,6 +2863,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2854 case ixgbe_mac_X540: 2863 case ixgbe_mac_X540:
2855 case ixgbe_mac_X550: 2864 case ixgbe_mac_X550:
2856 case ixgbe_mac_X550EM_x: 2865 case ixgbe_mac_X550EM_x:
2866 case ixgbe_mac_x550em_a:
2857 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 2867 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2858 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 2868 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2859 break; 2869 break;
@@ -3483,18 +3493,27 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3483 * Communicates with the manageability block. On success return 0 3493 * Communicates with the manageability block. On success return 0
3484 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. 3494 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3485 **/ 3495 **/
3486s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, 3496s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
3487 u32 length, u32 timeout, 3497 u32 length, u32 timeout,
3488 bool return_data) 3498 bool return_data)
3489{ 3499{
3490 u32 hicr, i, bi, fwsts;
3491 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 3500 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3501 u32 hicr, i, bi, fwsts;
3492 u16 buf_len, dword_len; 3502 u16 buf_len, dword_len;
3503 union {
3504 struct ixgbe_hic_hdr hdr;
3505 u32 u32arr[1];
3506 } *bp = buffer;
3507 s32 status;
3493 3508
3494 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3509 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3495 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); 3510 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
3496 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3511 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3497 } 3512 }
3513 /* Take management host interface semaphore */
3514 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3515 if (status)
3516 return status;
3498 3517
3499 /* Set bit 9 of FWSTS clearing FW reset indication */ 3518 /* Set bit 9 of FWSTS clearing FW reset indication */
3500 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); 3519 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
@@ -3502,26 +3521,27 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3502 3521
3503 /* Check that the host interface is enabled. */ 3522 /* Check that the host interface is enabled. */
3504 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3523 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3505 if ((hicr & IXGBE_HICR_EN) == 0) { 3524 if (!(hicr & IXGBE_HICR_EN)) {
3506 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); 3525 hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
3507 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3526 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3527 goto rel_out;
3508 } 3528 }
3509 3529
3510 /* Calculate length in DWORDs. We must be DWORD aligned */ 3530 /* Calculate length in DWORDs. We must be DWORD aligned */
3511 if ((length % (sizeof(u32))) != 0) { 3531 if (length % sizeof(u32)) {
3512 hw_dbg(hw, "Buffer length failure, not aligned to dword"); 3532 hw_dbg(hw, "Buffer length failure, not aligned to dword");
3513 return IXGBE_ERR_INVALID_ARGUMENT; 3533 status = IXGBE_ERR_INVALID_ARGUMENT;
3534 goto rel_out;
3514 } 3535 }
3515 3536
3516 dword_len = length >> 2; 3537 dword_len = length >> 2;
3517 3538
3518 /* 3539 /* The device driver writes the relevant command block
3519 * The device driver writes the relevant command block
3520 * into the ram area. 3540 * into the ram area.
3521 */ 3541 */
3522 for (i = 0; i < dword_len; i++) 3542 for (i = 0; i < dword_len; i++)
3523 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, 3543 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3524 i, cpu_to_le32(buffer[i])); 3544 i, cpu_to_le32(bp->u32arr[i]));
3525 3545
3526 /* Setting this bit tells the ARC that a new command is pending. */ 3546 /* Setting this bit tells the ARC that a new command is pending. */
3527 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 3547 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
@@ -3534,44 +3554,49 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3534 } 3554 }
3535 3555
3536 /* Check command successful completion. */ 3556 /* Check command successful completion. */
3537 if ((timeout != 0 && i == timeout) || 3557 if ((timeout && i == timeout) ||
3538 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { 3558 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
3539 hw_dbg(hw, "Command has failed with no status valid.\n"); 3559 hw_dbg(hw, "Command has failed with no status valid.\n");
3540 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3560 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3561 goto rel_out;
3541 } 3562 }
3542 3563
3543 if (!return_data) 3564 if (!return_data)
3544 return 0; 3565 goto rel_out;
3545 3566
3546 /* Calculate length in DWORDs */ 3567 /* Calculate length in DWORDs */
3547 dword_len = hdr_size >> 2; 3568 dword_len = hdr_size >> 2;
3548 3569
3549 /* first pull in the header so we know the buffer length */ 3570 /* first pull in the header so we know the buffer length */
3550 for (bi = 0; bi < dword_len; bi++) { 3571 for (bi = 0; bi < dword_len; bi++) {
3551 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3572 bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3552 le32_to_cpus(&buffer[bi]); 3573 le32_to_cpus(&bp->u32arr[bi]);
3553 } 3574 }
3554 3575
3555 /* If there is any thing in data position pull it in */ 3576 /* If there is any thing in data position pull it in */
3556 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; 3577 buf_len = bp->hdr.buf_len;
3557 if (buf_len == 0) 3578 if (!buf_len)
3558 return 0; 3579 goto rel_out;
3559 3580
3560 if (length < (buf_len + hdr_size)) { 3581 if (length < round_up(buf_len, 4) + hdr_size) {
3561 hw_dbg(hw, "Buffer not large enough for reply message.\n"); 3582 hw_dbg(hw, "Buffer not large enough for reply message.\n");
3562 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3583 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3584 goto rel_out;
3563 } 3585 }
3564 3586
3565 /* Calculate length in DWORDs, add 3 for odd lengths */ 3587 /* Calculate length in DWORDs, add 3 for odd lengths */
3566 dword_len = (buf_len + 3) >> 2; 3588 dword_len = (buf_len + 3) >> 2;
3567 3589
3568 /* Pull in the rest of the buffer (bi is where we left off)*/ 3590 /* Pull in the rest of the buffer (bi is where we left off) */
3569 for (; bi <= dword_len; bi++) { 3591 for (; bi <= dword_len; bi++) {
3570 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 3592 bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3571 le32_to_cpus(&buffer[bi]); 3593 le32_to_cpus(&bp->u32arr[bi]);
3572 } 3594 }
3573 3595
3574 return 0; 3596rel_out:
3597 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3598
3599 return status;
3575} 3600}
3576 3601
3577/** 3602/**
@@ -3594,13 +3619,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3594 int i; 3619 int i;
3595 s32 ret_val; 3620 s32 ret_val;
3596 3621
3597 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM))
3598 return IXGBE_ERR_SWFW_SYNC;
3599
3600 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 3622 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
3601 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 3623 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
3602 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; 3624 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
3603 fw_cmd.port_num = (u8)hw->bus.func; 3625 fw_cmd.port_num = hw->bus.func;
3604 fw_cmd.ver_maj = maj; 3626 fw_cmd.ver_maj = maj;
3605 fw_cmd.ver_min = min; 3627 fw_cmd.ver_min = min;
3606 fw_cmd.ver_build = build; 3628 fw_cmd.ver_build = build;
@@ -3612,7 +3634,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3612 fw_cmd.pad2 = 0; 3634 fw_cmd.pad2 = 0;
3613 3635
3614 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 3636 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
3615 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, 3637 ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
3616 sizeof(fw_cmd), 3638 sizeof(fw_cmd),
3617 IXGBE_HI_COMMAND_TIMEOUT, 3639 IXGBE_HI_COMMAND_TIMEOUT,
3618 true); 3640 true);
@@ -3628,7 +3650,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3628 break; 3650 break;
3629 } 3651 }
3630 3652
3631 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3632 return ret_val; 3653 return ret_val;
3633} 3654}
3634 3655
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2b9563137fd8..6f8e6a56e242 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -81,6 +81,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
81s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); 81s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
82s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 82s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
83s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); 83s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
84s32 ixgbe_setup_fc_generic(struct ixgbe_hw *);
84bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); 85bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
85void ixgbe_fc_autoneg(struct ixgbe_hw *hw); 86void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
86 87
@@ -110,8 +111,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
110s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); 111s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
111s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, 112s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
112 u8 build, u8 ver); 113 u8 build, u8 ver);
113s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, 114s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
114 u32 length, u32 timeout, bool return_data); 115 u32 timeout, bool return_data);
115void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); 116void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
116bool ixgbe_mng_present(struct ixgbe_hw *hw); 117bool ixgbe_mng_present(struct ixgbe_hw *hw);
117bool ixgbe_mng_enabled(struct ixgbe_hw *hw); 118bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 02c7333a9c83..f8fb2acc2632 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -293,6 +293,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
293 case ixgbe_mac_X540: 293 case ixgbe_mac_X540:
294 case ixgbe_mac_X550: 294 case ixgbe_mac_X550:
295 case ixgbe_mac_X550EM_x: 295 case ixgbe_mac_X550EM_x:
296 case ixgbe_mac_x550em_a:
296 return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, 297 return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
297 bwgid, ptype, prio_tc); 298 bwgid, ptype, prio_tc);
298 default: 299 default:
@@ -311,6 +312,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
311 case ixgbe_mac_X540: 312 case ixgbe_mac_X540:
312 case ixgbe_mac_X550: 313 case ixgbe_mac_X550:
313 case ixgbe_mac_X550EM_x: 314 case ixgbe_mac_X550EM_x:
315 case ixgbe_mac_x550em_a:
314 return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); 316 return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
315 default: 317 default:
316 break; 318 break;
@@ -368,6 +370,7 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
368 case ixgbe_mac_X540: 370 case ixgbe_mac_X540:
369 case ixgbe_mac_X550: 371 case ixgbe_mac_X550:
370 case ixgbe_mac_X550EM_x: 372 case ixgbe_mac_X550EM_x:
373 case ixgbe_mac_x550em_a:
371 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, 374 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
372 bwg_id, prio_type, prio_tc); 375 bwg_id, prio_type, prio_tc);
373 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, 376 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
@@ -398,6 +401,7 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
398 case ixgbe_mac_X540: 401 case ixgbe_mac_X540:
399 case ixgbe_mac_X550: 402 case ixgbe_mac_X550:
400 case ixgbe_mac_X550EM_x: 403 case ixgbe_mac_X550EM_x:
404 case ixgbe_mac_x550em_a:
401 ixgbe_dcb_read_rtrup2tc_82599(hw, map); 405 ixgbe_dcb_read_rtrup2tc_82599(hw, map);
402 break; 406 break;
403 default: 407 default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 726e0eeee63b..9f76be1431b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -547,6 +547,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
547 case ixgbe_mac_X540: 547 case ixgbe_mac_X540:
548 case ixgbe_mac_X550: 548 case ixgbe_mac_X550:
549 case ixgbe_mac_X550EM_x: 549 case ixgbe_mac_X550EM_x:
550 case ixgbe_mac_x550em_a:
550 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); 551 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
551 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 552 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
552 break; 553 break;
@@ -660,6 +661,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
660 case ixgbe_mac_X540: 661 case ixgbe_mac_X540:
661 case ixgbe_mac_X550: 662 case ixgbe_mac_X550:
662 case ixgbe_mac_X550EM_x: 663 case ixgbe_mac_X550EM_x:
664 case ixgbe_mac_x550em_a:
663 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 665 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
664 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); 666 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
665 for (i = 0; i < 8; i++) 667 for (i = 0; i < 8; i++)
@@ -1443,6 +1445,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1443 case ixgbe_mac_X540: 1445 case ixgbe_mac_X540:
1444 case ixgbe_mac_X550: 1446 case ixgbe_mac_X550:
1445 case ixgbe_mac_X550EM_x: 1447 case ixgbe_mac_X550EM_x:
1448 case ixgbe_mac_x550em_a:
1446 toggle = 0x7FFFF30F; 1449 toggle = 0x7FFFF30F;
1447 test = reg_test_82599; 1450 test = reg_test_82599;
1448 break; 1451 break;
@@ -1681,6 +1684,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1681 case ixgbe_mac_X540: 1684 case ixgbe_mac_X540:
1682 case ixgbe_mac_X550: 1685 case ixgbe_mac_X550:
1683 case ixgbe_mac_X550EM_x: 1686 case ixgbe_mac_X550EM_x:
1687 case ixgbe_mac_x550em_a:
1684 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1688 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1685 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1689 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1686 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1690 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
@@ -1720,6 +1724,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1720 case ixgbe_mac_X540: 1724 case ixgbe_mac_X540:
1721 case ixgbe_mac_X550: 1725 case ixgbe_mac_X550:
1722 case ixgbe_mac_X550EM_x: 1726 case ixgbe_mac_X550EM_x:
1727 case ixgbe_mac_x550em_a:
1723 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1728 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1724 reg_data |= IXGBE_DMATXCTL_TE; 1729 reg_data |= IXGBE_DMATXCTL_TE;
1725 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1730 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1780,6 +1785,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1780 case ixgbe_mac_X540: 1785 case ixgbe_mac_X540:
1781 case ixgbe_mac_X550: 1786 case ixgbe_mac_X550:
1782 case ixgbe_mac_X550EM_x: 1787 case ixgbe_mac_X550EM_x:
1788 case ixgbe_mac_x550em_a:
1783 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); 1789 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1784 reg_data |= IXGBE_MACC_FLU; 1790 reg_data |= IXGBE_MACC_FLU;
1785 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); 1791 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
@@ -2053,7 +2059,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
2053 2059
2054 if (if_running) 2060 if (if_running)
2055 /* indicate we're in test mode */ 2061 /* indicate we're in test mode */
2056 dev_close(netdev); 2062 ixgbe_close(netdev);
2057 else 2063 else
2058 ixgbe_reset(adapter); 2064 ixgbe_reset(adapter);
2059 2065
@@ -2091,7 +2097,7 @@ skip_loopback:
2091 /* clear testing bit and return adapter to previous state */ 2097 /* clear testing bit and return adapter to previous state */
2092 clear_bit(__IXGBE_TESTING, &adapter->state); 2098 clear_bit(__IXGBE_TESTING, &adapter->state);
2093 if (if_running) 2099 if (if_running)
2094 dev_open(netdev); 2100 ixgbe_open(netdev);
2095 else if (hw->mac.ops.disable_tx_laser) 2101 else if (hw->mac.ops.disable_tx_laser)
2096 hw->mac.ops.disable_tx_laser(hw); 2102 hw->mac.ops.disable_tx_laser(hw);
2097 } else { 2103 } else {
@@ -2991,6 +2997,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
2991 switch (adapter->hw.mac.type) { 2997 switch (adapter->hw.mac.type) {
2992 case ixgbe_mac_X550: 2998 case ixgbe_mac_X550:
2993 case ixgbe_mac_X550EM_x: 2999 case ixgbe_mac_X550EM_x:
3000 case ixgbe_mac_x550em_a:
2994 case ixgbe_mac_X540: 3001 case ixgbe_mac_X540:
2995 case ixgbe_mac_82599EB: 3002 case ixgbe_mac_82599EB:
2996 info->so_timestamping = 3003 info->so_timestamping =
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index e771e764daa3..bcdc88444ceb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -128,6 +128,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
128 case ixgbe_mac_X540: 128 case ixgbe_mac_X540:
129 case ixgbe_mac_X550: 129 case ixgbe_mac_X550:
130 case ixgbe_mac_X550EM_x: 130 case ixgbe_mac_X550EM_x:
131 case ixgbe_mac_x550em_a:
131 if (num_tcs > 4) { 132 if (num_tcs > 4) {
132 /* 133 /*
133 * TCs : TC0/1 TC2/3 TC4-7 134 * TCs : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 569cb0757c93..2976df77bf14 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2015 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -54,15 +54,6 @@
54#include <net/pkt_cls.h> 54#include <net/pkt_cls.h>
55#include <net/tc_act/tc_gact.h> 55#include <net/tc_act/tc_gact.h>
56 56
57#ifdef CONFIG_OF
58#include <linux/of_net.h>
59#endif
60
61#ifdef CONFIG_SPARC
62#include <asm/idprom.h>
63#include <asm/prom.h>
64#endif
65
66#include "ixgbe.h" 57#include "ixgbe.h"
67#include "ixgbe_common.h" 58#include "ixgbe_common.h"
68#include "ixgbe_dcb_82599.h" 59#include "ixgbe_dcb_82599.h"
@@ -79,10 +70,10 @@ char ixgbe_default_device_descr[] =
79static char ixgbe_default_device_descr[] = 70static char ixgbe_default_device_descr[] =
80 "Intel(R) 10 Gigabit Network Connection"; 71 "Intel(R) 10 Gigabit Network Connection";
81#endif 72#endif
82#define DRV_VERSION "4.2.1-k" 73#define DRV_VERSION "4.4.0-k"
83const char ixgbe_driver_version[] = DRV_VERSION; 74const char ixgbe_driver_version[] = DRV_VERSION;
84static const char ixgbe_copyright[] = 75static const char ixgbe_copyright[] =
85 "Copyright (c) 1999-2015 Intel Corporation."; 76 "Copyright (c) 1999-2016 Intel Corporation.";
86 77
87static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; 78static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
88 79
@@ -92,6 +83,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
92 [board_X540] = &ixgbe_X540_info, 83 [board_X540] = &ixgbe_X540_info,
93 [board_X550] = &ixgbe_X550_info, 84 [board_X550] = &ixgbe_X550_info,
94 [board_X550EM_x] = &ixgbe_X550EM_x_info, 85 [board_X550EM_x] = &ixgbe_X550EM_x_info,
86 [board_x550em_a] = &ixgbe_x550em_a_info,
95}; 87};
96 88
97/* ixgbe_pci_tbl - PCI Device ID Table 89/* ixgbe_pci_tbl - PCI Device ID Table
@@ -134,10 +126,17 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
134 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, 126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
135 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, 127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
136 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, 128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
137 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, 130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
138 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, 131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
139 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, 132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
140 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, 133 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
134 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
135 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
136 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
137 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
138 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
139 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
141 /* required last entry */ 140 /* required last entry */
142 {0, } 141 {0, }
143}; 142};
@@ -869,6 +868,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
869 case ixgbe_mac_X540: 868 case ixgbe_mac_X540:
870 case ixgbe_mac_X550: 869 case ixgbe_mac_X550:
871 case ixgbe_mac_X550EM_x: 870 case ixgbe_mac_X550EM_x:
871 case ixgbe_mac_x550em_a:
872 if (direction == -1) { 872 if (direction == -1) {
873 /* other causes */ 873 /* other causes */
874 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 874 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -907,6 +907,7 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
907 case ixgbe_mac_X540: 907 case ixgbe_mac_X540:
908 case ixgbe_mac_X550: 908 case ixgbe_mac_X550:
909 case ixgbe_mac_X550EM_x: 909 case ixgbe_mac_X550EM_x:
910 case ixgbe_mac_x550em_a:
910 mask = (qmask & 0xFFFFFFFF); 911 mask = (qmask & 0xFFFFFFFF);
911 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 912 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
912 mask = (qmask >> 32); 913 mask = (qmask >> 32);
@@ -1087,9 +1088,40 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1087} 1088}
1088 1089
1089/** 1090/**
1091 * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
1092 **/
1093static int ixgbe_tx_maxrate(struct net_device *netdev,
1094 int queue_index, u32 maxrate)
1095{
1096 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1097 struct ixgbe_hw *hw = &adapter->hw;
1098 u32 bcnrc_val = ixgbe_link_mbps(adapter);
1099
1100 if (!maxrate)
1101 return 0;
1102
1103 /* Calculate the rate factor values to set */
1104 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1105 bcnrc_val /= maxrate;
1106
1107 /* clear everything but the rate factor */
1108 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1109 IXGBE_RTTBCNRC_RF_DEC_MASK;
1110
1111 /* enable the rate scheduler */
1112 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1113
1114 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1115 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1116
1117 return 0;
1118}
1119
1120/**
1090 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 1121 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
1091 * @q_vector: structure containing interrupt and ring information 1122 * @q_vector: structure containing interrupt and ring information
1092 * @tx_ring: tx ring to clean 1123 * @tx_ring: tx ring to clean
1124 * @napi_budget: Used to determine if we are in netpoll
1093 **/ 1125 **/
1094static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, 1126static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1095 struct ixgbe_ring *tx_ring, int napi_budget) 1127 struct ixgbe_ring *tx_ring, int napi_budget)
@@ -2222,6 +2254,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2222 case ixgbe_mac_X540: 2254 case ixgbe_mac_X540:
2223 case ixgbe_mac_X550: 2255 case ixgbe_mac_X550:
2224 case ixgbe_mac_X550EM_x: 2256 case ixgbe_mac_X550EM_x:
2257 case ixgbe_mac_x550em_a:
2225 ixgbe_set_ivar(adapter, -1, 1, v_idx); 2258 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2226 break; 2259 break;
2227 default: 2260 default:
@@ -2333,6 +2366,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2333 case ixgbe_mac_X540: 2366 case ixgbe_mac_X540:
2334 case ixgbe_mac_X550: 2367 case ixgbe_mac_X550:
2335 case ixgbe_mac_X550EM_x: 2368 case ixgbe_mac_X550EM_x:
2369 case ixgbe_mac_x550em_a:
2336 /* 2370 /*
2337 * set the WDIS bit to not clear the timer bits and cause an 2371 * set the WDIS bit to not clear the timer bits and cause an
2338 * immediate assertion of the interrupt 2372 * immediate assertion of the interrupt
@@ -2494,6 +2528,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2494 return false; 2528 return false;
2495 case ixgbe_mac_82599EB: 2529 case ixgbe_mac_82599EB:
2496 case ixgbe_mac_X550EM_x: 2530 case ixgbe_mac_X550EM_x:
2531 case ixgbe_mac_x550em_a:
2497 switch (hw->mac.ops.get_media_type(hw)) { 2532 switch (hw->mac.ops.get_media_type(hw)) {
2498 case ixgbe_media_type_fiber: 2533 case ixgbe_media_type_fiber:
2499 case ixgbe_media_type_fiber_qsfp: 2534 case ixgbe_media_type_fiber_qsfp:
@@ -2568,6 +2603,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2568 case ixgbe_mac_X540: 2603 case ixgbe_mac_X540:
2569 case ixgbe_mac_X550: 2604 case ixgbe_mac_X550:
2570 case ixgbe_mac_X550EM_x: 2605 case ixgbe_mac_X550EM_x:
2606 case ixgbe_mac_x550em_a:
2571 mask = (qmask & 0xFFFFFFFF); 2607 mask = (qmask & 0xFFFFFFFF);
2572 if (mask) 2608 if (mask)
2573 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 2609 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -2596,6 +2632,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2596 case ixgbe_mac_X540: 2632 case ixgbe_mac_X540:
2597 case ixgbe_mac_X550: 2633 case ixgbe_mac_X550:
2598 case ixgbe_mac_X550EM_x: 2634 case ixgbe_mac_X550EM_x:
2635 case ixgbe_mac_x550em_a:
2599 mask = (qmask & 0xFFFFFFFF); 2636 mask = (qmask & 0xFFFFFFFF);
2600 if (mask) 2637 if (mask)
2601 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 2638 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
@@ -2631,6 +2668,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2631 case ixgbe_mac_X540: 2668 case ixgbe_mac_X540:
2632 case ixgbe_mac_X550: 2669 case ixgbe_mac_X550:
2633 case ixgbe_mac_X550EM_x: 2670 case ixgbe_mac_X550EM_x:
2671 case ixgbe_mac_x550em_a:
2634 mask |= IXGBE_EIMS_TS; 2672 mask |= IXGBE_EIMS_TS;
2635 break; 2673 break;
2636 default: 2674 default:
@@ -2646,7 +2684,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2646 case ixgbe_mac_X540: 2684 case ixgbe_mac_X540:
2647 case ixgbe_mac_X550: 2685 case ixgbe_mac_X550:
2648 case ixgbe_mac_X550EM_x: 2686 case ixgbe_mac_X550EM_x:
2649 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP) 2687 case ixgbe_mac_x550em_a:
2688 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
2689 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
2690 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
2650 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); 2691 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
2651 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) 2692 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
2652 mask |= IXGBE_EICR_GPI_SDP0_X540; 2693 mask |= IXGBE_EICR_GPI_SDP0_X540;
@@ -2704,6 +2745,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
2704 case ixgbe_mac_X540: 2745 case ixgbe_mac_X540:
2705 case ixgbe_mac_X550: 2746 case ixgbe_mac_X550:
2706 case ixgbe_mac_X550EM_x: 2747 case ixgbe_mac_X550EM_x:
2748 case ixgbe_mac_x550em_a:
2707 if (hw->phy.type == ixgbe_phy_x550em_ext_t && 2749 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
2708 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2750 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2709 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; 2751 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -2786,8 +2828,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
2786 ixgbe_update_dca(q_vector); 2828 ixgbe_update_dca(q_vector);
2787#endif 2829#endif
2788 2830
2789 ixgbe_for_each_ring(ring, q_vector->tx) 2831 ixgbe_for_each_ring(ring, q_vector->tx) {
2790 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget); 2832 if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
2833 clean_complete = false;
2834 }
2791 2835
2792 /* Exit if we are called by netpoll or busy polling is active */ 2836 /* Exit if we are called by netpoll or busy polling is active */
2793 if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) 2837 if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
@@ -2805,7 +2849,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
2805 per_ring_budget); 2849 per_ring_budget);
2806 2850
2807 work_done += cleaned; 2851 work_done += cleaned;
2808 clean_complete &= (cleaned < per_ring_budget); 2852 if (cleaned >= per_ring_budget)
2853 clean_complete = false;
2809 } 2854 }
2810 2855
2811 ixgbe_qv_unlock_napi(q_vector); 2856 ixgbe_qv_unlock_napi(q_vector);
@@ -2937,6 +2982,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2937 case ixgbe_mac_X540: 2982 case ixgbe_mac_X540:
2938 case ixgbe_mac_X550: 2983 case ixgbe_mac_X550:
2939 case ixgbe_mac_X550EM_x: 2984 case ixgbe_mac_X550EM_x:
2985 case ixgbe_mac_x550em_a:
2940 if (eicr & IXGBE_EICR_ECC) { 2986 if (eicr & IXGBE_EICR_ECC) {
2941 e_info(link, "Received ECC Err, initiating reset\n"); 2987 e_info(link, "Received ECC Err, initiating reset\n");
2942 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; 2988 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -3033,6 +3079,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3033 case ixgbe_mac_X540: 3079 case ixgbe_mac_X540:
3034 case ixgbe_mac_X550: 3080 case ixgbe_mac_X550:
3035 case ixgbe_mac_X550EM_x: 3081 case ixgbe_mac_X550EM_x:
3082 case ixgbe_mac_x550em_a:
3036 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 3083 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3037 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 3084 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3038 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 3085 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3832,6 +3879,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3832 break; 3879 break;
3833 case ixgbe_mac_X550: 3880 case ixgbe_mac_X550:
3834 case ixgbe_mac_X550EM_x: 3881 case ixgbe_mac_X550EM_x:
3882 case ixgbe_mac_x550em_a:
3835 if (adapter->num_vfs) 3883 if (adapter->num_vfs)
3836 rdrxctl |= IXGBE_RDRXCTL_PSP; 3884 rdrxctl |= IXGBE_RDRXCTL_PSP;
3837 /* fall through for older HW */ 3885 /* fall through for older HW */
@@ -3908,7 +3956,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
3908 struct ixgbe_hw *hw = &adapter->hw; 3956 struct ixgbe_hw *hw = &adapter->hw;
3909 3957
3910 /* add VID to filter table */ 3958 /* add VID to filter table */
3911 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true); 3959 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
3960 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
3961
3912 set_bit(vid, adapter->active_vlans); 3962 set_bit(vid, adapter->active_vlans);
3913 3963
3914 return 0; 3964 return 0;
@@ -3965,9 +4015,7 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3965 struct ixgbe_hw *hw = &adapter->hw; 4015 struct ixgbe_hw *hw = &adapter->hw;
3966 4016
3967 /* remove VID from filter table */ 4017 /* remove VID from filter table */
3968 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) 4018 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
3969 ixgbe_update_pf_promisc_vlvf(adapter, vid);
3970 else
3971 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); 4019 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
3972 4020
3973 clear_bit(vid, adapter->active_vlans); 4021 clear_bit(vid, adapter->active_vlans);
@@ -3995,6 +4043,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3995 case ixgbe_mac_X540: 4043 case ixgbe_mac_X540:
3996 case ixgbe_mac_X550: 4044 case ixgbe_mac_X550:
3997 case ixgbe_mac_X550EM_x: 4045 case ixgbe_mac_X550EM_x:
4046 case ixgbe_mac_x550em_a:
3998 for (i = 0; i < adapter->num_rx_queues; i++) { 4047 for (i = 0; i < adapter->num_rx_queues; i++) {
3999 struct ixgbe_ring *ring = adapter->rx_ring[i]; 4048 struct ixgbe_ring *ring = adapter->rx_ring[i];
4000 4049
@@ -4031,6 +4080,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4031 case ixgbe_mac_X540: 4080 case ixgbe_mac_X540:
4032 case ixgbe_mac_X550: 4081 case ixgbe_mac_X550:
4033 case ixgbe_mac_X550EM_x: 4082 case ixgbe_mac_X550EM_x:
4083 case ixgbe_mac_x550em_a:
4034 for (i = 0; i < adapter->num_rx_queues; i++) { 4084 for (i = 0; i < adapter->num_rx_queues; i++) {
4035 struct ixgbe_ring *ring = adapter->rx_ring[i]; 4085 struct ixgbe_ring *ring = adapter->rx_ring[i];
4036 4086
@@ -4057,6 +4107,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4057 case ixgbe_mac_X540: 4107 case ixgbe_mac_X540:
4058 case ixgbe_mac_X550: 4108 case ixgbe_mac_X550:
4059 case ixgbe_mac_X550EM_x: 4109 case ixgbe_mac_X550EM_x:
4110 case ixgbe_mac_x550em_a:
4060 default: 4111 default:
4061 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) 4112 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
4062 break; 4113 break;
@@ -4147,6 +4198,7 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4147 case ixgbe_mac_X540: 4198 case ixgbe_mac_X540:
4148 case ixgbe_mac_X550: 4199 case ixgbe_mac_X550:
4149 case ixgbe_mac_X550EM_x: 4200 case ixgbe_mac_X550EM_x:
4201 case ixgbe_mac_x550em_a:
4150 default: 4202 default:
4151 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) 4203 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
4152 break; 4204 break;
@@ -4172,11 +4224,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4172 4224
4173static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 4225static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4174{ 4226{
4175 u16 vid; 4227 u16 vid = 1;
4176 4228
4177 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); 4229 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4178 4230
4179 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4231 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4180 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 4232 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4181} 4233}
4182 4234
@@ -4426,6 +4478,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
4426 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4478 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4427 struct ixgbe_hw *hw = &adapter->hw; 4479 struct ixgbe_hw *hw = &adapter->hw;
4428 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; 4480 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4481 netdev_features_t features = netdev->features;
4429 int count; 4482 int count;
4430 4483
4431 /* Check for Promiscuous and All Multicast modes */ 4484 /* Check for Promiscuous and All Multicast modes */
@@ -4443,14 +4496,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
4443 hw->addr_ctrl.user_set_promisc = true; 4496 hw->addr_ctrl.user_set_promisc = true;
4444 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4497 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4445 vmolr |= IXGBE_VMOLR_MPE; 4498 vmolr |= IXGBE_VMOLR_MPE;
4446 ixgbe_vlan_promisc_enable(adapter); 4499 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4447 } else { 4500 } else {
4448 if (netdev->flags & IFF_ALLMULTI) { 4501 if (netdev->flags & IFF_ALLMULTI) {
4449 fctrl |= IXGBE_FCTRL_MPE; 4502 fctrl |= IXGBE_FCTRL_MPE;
4450 vmolr |= IXGBE_VMOLR_MPE; 4503 vmolr |= IXGBE_VMOLR_MPE;
4451 } 4504 }
4452 hw->addr_ctrl.user_set_promisc = false; 4505 hw->addr_ctrl.user_set_promisc = false;
4453 ixgbe_vlan_promisc_disable(adapter);
4454 } 4506 }
4455 4507
4456 /* 4508 /*
@@ -4483,7 +4535,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
4483 } 4535 }
4484 4536
4485 /* This is useful for sniffing bad packets. */ 4537 /* This is useful for sniffing bad packets. */
4486 if (adapter->netdev->features & NETIF_F_RXALL) { 4538 if (features & NETIF_F_RXALL) {
4487 /* UPE and MPE will be handled by normal PROMISC logic 4539 /* UPE and MPE will be handled by normal PROMISC logic
4488 * in e1000e_set_rx_mode */ 4540 * in e1000e_set_rx_mode */
4489 fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ 4541 fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
@@ -4496,10 +4548,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
4496 4548
4497 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4549 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4498 4550
4499 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 4551 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4500 ixgbe_vlan_strip_enable(adapter); 4552 ixgbe_vlan_strip_enable(adapter);
4501 else 4553 else
4502 ixgbe_vlan_strip_disable(adapter); 4554 ixgbe_vlan_strip_disable(adapter);
4555
4556 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
4557 ixgbe_vlan_promisc_disable(adapter);
4558 else
4559 ixgbe_vlan_promisc_enable(adapter);
4503} 4560}
4504 4561
4505static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 4562static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -4530,10 +4587,9 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
4530 switch (adapter->hw.mac.type) { 4587 switch (adapter->hw.mac.type) {
4531 case ixgbe_mac_X550: 4588 case ixgbe_mac_X550:
4532 case ixgbe_mac_X550EM_x: 4589 case ixgbe_mac_X550EM_x:
4590 case ixgbe_mac_x550em_a:
4533 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); 4591 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
4534#ifdef CONFIG_IXGBE_VXLAN
4535 adapter->vxlan_port = 0; 4592 adapter->vxlan_port = 0;
4536#endif
4537 break; 4593 break;
4538 default: 4594 default:
4539 break; 4595 break;
@@ -4632,6 +4688,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4632 case ixgbe_mac_X540: 4688 case ixgbe_mac_X540:
4633 case ixgbe_mac_X550: 4689 case ixgbe_mac_X550:
4634 case ixgbe_mac_X550EM_x: 4690 case ixgbe_mac_X550EM_x:
4691 case ixgbe_mac_x550em_a:
4635 dv_id = IXGBE_DV_X540(link, tc); 4692 dv_id = IXGBE_DV_X540(link, tc);
4636 break; 4693 break;
4637 default: 4694 default:
@@ -4692,6 +4749,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
4692 case ixgbe_mac_X540: 4749 case ixgbe_mac_X540:
4693 case ixgbe_mac_X550: 4750 case ixgbe_mac_X550:
4694 case ixgbe_mac_X550EM_x: 4751 case ixgbe_mac_X550EM_x:
4752 case ixgbe_mac_x550em_a:
4695 dv_id = IXGBE_LOW_DV_X540(tc); 4753 dv_id = IXGBE_LOW_DV_X540(tc);
4696 break; 4754 break;
4697 default: 4755 default:
@@ -5108,6 +5166,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5108 case ixgbe_mac_X540: 5166 case ixgbe_mac_X540:
5109 case ixgbe_mac_X550: 5167 case ixgbe_mac_X550:
5110 case ixgbe_mac_X550EM_x: 5168 case ixgbe_mac_X550EM_x:
5169 case ixgbe_mac_x550em_a:
5111 default: 5170 default:
5112 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 5171 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5113 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 5172 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5158,6 +5217,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5158 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; 5217 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5159 break; 5218 break;
5160 case ixgbe_mac_X550EM_x: 5219 case ixgbe_mac_X550EM_x:
5220 case ixgbe_mac_x550em_a:
5161 gpie |= IXGBE_SDP0_GPIEN_X540; 5221 gpie |= IXGBE_SDP0_GPIEN_X540;
5162 break; 5222 break;
5163 default: 5223 default:
@@ -5469,6 +5529,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
5469 case ixgbe_mac_X540: 5529 case ixgbe_mac_X540:
5470 case ixgbe_mac_X550: 5530 case ixgbe_mac_X550:
5471 case ixgbe_mac_X550EM_x: 5531 case ixgbe_mac_X550EM_x:
5532 case ixgbe_mac_x550em_a:
5472 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 5533 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5473 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 5534 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5474 ~IXGBE_DMATXCTL_TE)); 5535 ~IXGBE_DMATXCTL_TE));
@@ -5587,6 +5648,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5587 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 5648 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5588 break; 5649 break;
5589 case ixgbe_mac_X550EM_x: 5650 case ixgbe_mac_X550EM_x:
5651 case ixgbe_mac_x550em_a:
5590 case ixgbe_mac_X550: 5652 case ixgbe_mac_X550:
5591#ifdef CONFIG_IXGBE_DCA 5653#ifdef CONFIG_IXGBE_DCA
5592 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; 5654 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
@@ -5612,6 +5674,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5612 case ixgbe_mac_X540: 5674 case ixgbe_mac_X540:
5613 case ixgbe_mac_X550: 5675 case ixgbe_mac_X550:
5614 case ixgbe_mac_X550EM_x: 5676 case ixgbe_mac_X550EM_x:
5677 case ixgbe_mac_x550em_a:
5615 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; 5678 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
5616 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; 5679 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
5617 break; 5680 break;
@@ -5994,7 +6057,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5994 * handler is registered with the OS, the watchdog timer is started, 6057 * handler is registered with the OS, the watchdog timer is started,
5995 * and the stack is notified that the interface is ready. 6058 * and the stack is notified that the interface is ready.
5996 **/ 6059 **/
5997static int ixgbe_open(struct net_device *netdev) 6060int ixgbe_open(struct net_device *netdev)
5998{ 6061{
5999 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6062 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6000 struct ixgbe_hw *hw = &adapter->hw; 6063 struct ixgbe_hw *hw = &adapter->hw;
@@ -6096,7 +6159,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6096 * needs to be disabled. A global MAC reset is issued to stop the 6159 * needs to be disabled. A global MAC reset is issued to stop the
6097 * hardware, and all transmit and receive resources are freed. 6160 * hardware, and all transmit and receive resources are freed.
6098 **/ 6161 **/
6099static int ixgbe_close(struct net_device *netdev) 6162int ixgbe_close(struct net_device *netdev)
6100{ 6163{
6101 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6164 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6102 6165
@@ -6219,6 +6282,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6219 case ixgbe_mac_X540: 6282 case ixgbe_mac_X540:
6220 case ixgbe_mac_X550: 6283 case ixgbe_mac_X550:
6221 case ixgbe_mac_X550EM_x: 6284 case ixgbe_mac_X550EM_x:
6285 case ixgbe_mac_x550em_a:
6222 pci_wake_from_d3(pdev, !!wufc); 6286 pci_wake_from_d3(pdev, !!wufc);
6223 break; 6287 break;
6224 default: 6288 default:
@@ -6354,6 +6418,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
6354 case ixgbe_mac_X540: 6418 case ixgbe_mac_X540:
6355 case ixgbe_mac_X550: 6419 case ixgbe_mac_X550:
6356 case ixgbe_mac_X550EM_x: 6420 case ixgbe_mac_X550EM_x:
6421 case ixgbe_mac_x550em_a:
6357 hwstats->pxonrxc[i] += 6422 hwstats->pxonrxc[i] +=
6358 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 6423 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
6359 break; 6424 break;
@@ -6369,7 +6434,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
6369 if ((hw->mac.type == ixgbe_mac_82599EB) || 6434 if ((hw->mac.type == ixgbe_mac_82599EB) ||
6370 (hw->mac.type == ixgbe_mac_X540) || 6435 (hw->mac.type == ixgbe_mac_X540) ||
6371 (hw->mac.type == ixgbe_mac_X550) || 6436 (hw->mac.type == ixgbe_mac_X550) ||
6372 (hw->mac.type == ixgbe_mac_X550EM_x)) { 6437 (hw->mac.type == ixgbe_mac_X550EM_x) ||
6438 (hw->mac.type == ixgbe_mac_x550em_a)) {
6373 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 6439 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
6374 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ 6440 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
6375 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 6441 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -6394,6 +6460,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
6394 case ixgbe_mac_X540: 6460 case ixgbe_mac_X540:
6395 case ixgbe_mac_X550: 6461 case ixgbe_mac_X550:
6396 case ixgbe_mac_X550EM_x: 6462 case ixgbe_mac_X550EM_x:
6463 case ixgbe_mac_x550em_a:
6397 /* OS2BMC stats are X540 and later */ 6464 /* OS2BMC stats are X540 and later */
6398 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); 6465 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
6399 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); 6466 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -6664,6 +6731,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6664 case ixgbe_mac_X540: 6731 case ixgbe_mac_X540:
6665 case ixgbe_mac_X550: 6732 case ixgbe_mac_X550:
6666 case ixgbe_mac_X550EM_x: 6733 case ixgbe_mac_X550EM_x:
6734 case ixgbe_mac_x550em_a:
6667 case ixgbe_mac_82599EB: { 6735 case ixgbe_mac_82599EB: {
6668 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 6736 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6669 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 6737 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -7213,103 +7281,61 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7213 return 1; 7281 return 1;
7214} 7282}
7215 7283
7284static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
7285{
7286 unsigned int offset = 0;
7287
7288 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
7289
7290 return offset == skb_checksum_start_offset(skb);
7291}
7292
7216static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, 7293static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
7217 struct ixgbe_tx_buffer *first) 7294 struct ixgbe_tx_buffer *first)
7218{ 7295{
7219 struct sk_buff *skb = first->skb; 7296 struct sk_buff *skb = first->skb;
7220 u32 vlan_macip_lens = 0; 7297 u32 vlan_macip_lens = 0;
7221 u32 mss_l4len_idx = 0;
7222 u32 type_tucmd = 0; 7298 u32 type_tucmd = 0;
7223 7299
7224 if (skb->ip_summed != CHECKSUM_PARTIAL) { 7300 if (skb->ip_summed != CHECKSUM_PARTIAL) {
7225 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && 7301csum_failed:
7226 !(first->tx_flags & IXGBE_TX_FLAGS_CC)) 7302 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
7303 IXGBE_TX_FLAGS_CC)))
7227 return; 7304 return;
7228 vlan_macip_lens = skb_network_offset(skb) << 7305 goto no_csum;
7229 IXGBE_ADVTXD_MACLEN_SHIFT; 7306 }
7230 } else {
7231 u8 l4_hdr = 0;
7232 union {
7233 struct iphdr *ipv4;
7234 struct ipv6hdr *ipv6;
7235 u8 *raw;
7236 } network_hdr;
7237 union {
7238 struct tcphdr *tcphdr;
7239 u8 *raw;
7240 } transport_hdr;
7241 __be16 frag_off;
7242
7243 if (skb->encapsulation) {
7244 network_hdr.raw = skb_inner_network_header(skb);
7245 transport_hdr.raw = skb_inner_transport_header(skb);
7246 vlan_macip_lens = skb_inner_network_offset(skb) <<
7247 IXGBE_ADVTXD_MACLEN_SHIFT;
7248 } else {
7249 network_hdr.raw = skb_network_header(skb);
7250 transport_hdr.raw = skb_transport_header(skb);
7251 vlan_macip_lens = skb_network_offset(skb) <<
7252 IXGBE_ADVTXD_MACLEN_SHIFT;
7253 }
7254
7255 /* use first 4 bits to determine IP version */
7256 switch (network_hdr.ipv4->version) {
7257 case IPVERSION:
7258 vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
7259 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7260 l4_hdr = network_hdr.ipv4->protocol;
7261 break;
7262 case 6:
7263 vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
7264 l4_hdr = network_hdr.ipv6->nexthdr;
7265 if (likely((transport_hdr.raw - network_hdr.raw) ==
7266 sizeof(struct ipv6hdr)))
7267 break;
7268 ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
7269 sizeof(struct ipv6hdr),
7270 &l4_hdr, &frag_off);
7271 if (unlikely(frag_off))
7272 l4_hdr = NEXTHDR_FRAGMENT;
7273 break;
7274 default:
7275 break;
7276 }
7277 7307
7278 switch (l4_hdr) { 7308 switch (skb->csum_offset) {
7279 case IPPROTO_TCP: 7309 case offsetof(struct tcphdr, check):
7280 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 7310 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7281 mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << 7311 /* fall through */
7282 IXGBE_ADVTXD_L4LEN_SHIFT; 7312 case offsetof(struct udphdr, check):
7283 break; 7313 break;
7284 case IPPROTO_SCTP: 7314 case offsetof(struct sctphdr, checksum):
7285 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 7315 /* validate that this is actually an SCTP request */
7286 mss_l4len_idx = sizeof(struct sctphdr) << 7316 if (((first->protocol == htons(ETH_P_IP)) &&
7287 IXGBE_ADVTXD_L4LEN_SHIFT; 7317 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
7288 break; 7318 ((first->protocol == htons(ETH_P_IPV6)) &&
7289 case IPPROTO_UDP: 7319 ixgbe_ipv6_csum_is_sctp(skb))) {
7290 mss_l4len_idx = sizeof(struct udphdr) << 7320 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
7291 IXGBE_ADVTXD_L4LEN_SHIFT;
7292 break; 7321 break;
7293 default:
7294 if (unlikely(net_ratelimit())) {
7295 dev_warn(tx_ring->dev,
7296 "partial checksum, version=%d, l4 proto=%x\n",
7297 network_hdr.ipv4->version, l4_hdr);
7298 }
7299 skb_checksum_help(skb);
7300 goto no_csum;
7301 } 7322 }
7302 7323 /* fall through */
7303 /* update TX checksum flag */ 7324 default:
7304 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; 7325 skb_checksum_help(skb);
7326 goto csum_failed;
7305 } 7327 }
7306 7328
7329 /* update TX checksum flag */
7330 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
7331 vlan_macip_lens = skb_checksum_start_offset(skb) -
7332 skb_network_offset(skb);
7307no_csum: 7333no_csum:
7308 /* vlan_macip_lens: MACLEN, VLAN tag */ 7334 /* vlan_macip_lens: MACLEN, VLAN tag */
7335 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7309 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 7336 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7310 7337
7311 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, 7338 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
7312 type_tucmd, mss_l4len_idx);
7313} 7339}
7314 7340
7315#define IXGBE_SET_FLAG(_input, _flag, _result) \ 7341#define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -7560,11 +7586,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
7560 struct ipv6hdr *ipv6; 7586 struct ipv6hdr *ipv6;
7561 } hdr; 7587 } hdr;
7562 struct tcphdr *th; 7588 struct tcphdr *th;
7589 unsigned int hlen;
7563 struct sk_buff *skb; 7590 struct sk_buff *skb;
7564#ifdef CONFIG_IXGBE_VXLAN
7565 u8 encap = false;
7566#endif /* CONFIG_IXGBE_VXLAN */
7567 __be16 vlan_id; 7591 __be16 vlan_id;
7592 int l4_proto;
7568 7593
7569 /* if ring doesn't have a interrupt vector, cannot perform ATR */ 7594 /* if ring doesn't have a interrupt vector, cannot perform ATR */
7570 if (!q_vector) 7595 if (!q_vector)
@@ -7576,62 +7601,50 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
7576 7601
7577 ring->atr_count++; 7602 ring->atr_count++;
7578 7603
7604 /* currently only IPv4/IPv6 with TCP is supported */
7605 if ((first->protocol != htons(ETH_P_IP)) &&
7606 (first->protocol != htons(ETH_P_IPV6)))
7607 return;
7608
7579 /* snag network header to get L4 type and address */ 7609 /* snag network header to get L4 type and address */
7580 skb = first->skb; 7610 skb = first->skb;
7581 hdr.network = skb_network_header(skb); 7611 hdr.network = skb_network_header(skb);
7582 if (!skb->encapsulation) {
7583 th = tcp_hdr(skb);
7584 } else {
7585#ifdef CONFIG_IXGBE_VXLAN 7612#ifdef CONFIG_IXGBE_VXLAN
7613 if (skb->encapsulation &&
7614 first->protocol == htons(ETH_P_IP) &&
7615 hdr.ipv4->protocol != IPPROTO_UDP) {
7586 struct ixgbe_adapter *adapter = q_vector->adapter; 7616 struct ixgbe_adapter *adapter = q_vector->adapter;
7587 7617
7588 if (!adapter->vxlan_port) 7618 /* verify the port is recognized as VXLAN */
7589 return; 7619 if (adapter->vxlan_port &&
7590 if (first->protocol != htons(ETH_P_IP) || 7620 udp_hdr(skb)->dest == adapter->vxlan_port)
7591 hdr.ipv4->version != IPVERSION || 7621 hdr.network = skb_inner_network_header(skb);
7592 hdr.ipv4->protocol != IPPROTO_UDP) {
7593 return;
7594 }
7595 if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
7596 return;
7597 encap = true;
7598 hdr.network = skb_inner_network_header(skb);
7599 th = inner_tcp_hdr(skb);
7600#else
7601 return;
7602#endif /* CONFIG_IXGBE_VXLAN */
7603 } 7622 }
7623#endif /* CONFIG_IXGBE_VXLAN */
7604 7624
7605 /* Currently only IPv4/IPv6 with TCP is supported */ 7625 /* Currently only IPv4/IPv6 with TCP is supported */
7606 switch (hdr.ipv4->version) { 7626 switch (hdr.ipv4->version) {
7607 case IPVERSION: 7627 case IPVERSION:
7608 if (hdr.ipv4->protocol != IPPROTO_TCP) 7628 /* access ihl as u8 to avoid unaligned access on ia64 */
7609 return; 7629 hlen = (hdr.network[0] & 0x0F) << 2;
7630 l4_proto = hdr.ipv4->protocol;
7610 break; 7631 break;
7611 case 6: 7632 case 6:
7612 if (likely((unsigned char *)th - hdr.network == 7633 hlen = hdr.network - skb->data;
7613 sizeof(struct ipv6hdr))) { 7634 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
7614 if (hdr.ipv6->nexthdr != IPPROTO_TCP) 7635 hlen -= hdr.network - skb->data;
7615 return;
7616 } else {
7617 __be16 frag_off;
7618 u8 l4_hdr;
7619
7620 ipv6_skip_exthdr(skb, hdr.network - skb->data +
7621 sizeof(struct ipv6hdr),
7622 &l4_hdr, &frag_off);
7623 if (unlikely(frag_off))
7624 return;
7625 if (l4_hdr != IPPROTO_TCP)
7626 return;
7627 }
7628 break; 7636 break;
7629 default: 7637 default:
7630 return; 7638 return;
7631 } 7639 }
7632 7640
7633 /* skip this packet since it is invalid or the socket is closing */ 7641 if (l4_proto != IPPROTO_TCP)
7634 if (!th || th->fin) 7642 return;
7643
7644 th = (struct tcphdr *)(hdr.network + hlen);
7645
7646 /* skip this packet since the socket is closing */
7647 if (th->fin)
7635 return; 7648 return;
7636 7649
7637 /* sample on all syn packets or once every atr sample count */ 7650 /* sample on all syn packets or once every atr sample count */
@@ -7682,10 +7695,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
7682 break; 7695 break;
7683 } 7696 }
7684 7697
7685#ifdef CONFIG_IXGBE_VXLAN 7698 if (hdr.network != skb_network_header(skb))
7686 if (encap)
7687 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; 7699 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
7688#endif /* CONFIG_IXGBE_VXLAN */
7689 7700
7690 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 7701 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
7691 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, 7702 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
@@ -8209,10 +8220,17 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8209static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, 8220static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
8210 struct tc_cls_u32_offload *cls) 8221 struct tc_cls_u32_offload *cls)
8211{ 8222{
8223 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
8224 u32 loc;
8212 int err; 8225 int err;
8213 8226
8227 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
8228 return -EINVAL;
8229
8230 loc = cls->knode.handle & 0xfffff;
8231
8214 spin_lock(&adapter->fdir_perfect_lock); 8232 spin_lock(&adapter->fdir_perfect_lock);
8215 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, cls->knode.handle); 8233 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
8216 spin_unlock(&adapter->fdir_perfect_lock); 8234 spin_unlock(&adapter->fdir_perfect_lock);
8217 return err; 8235 return err;
8218} 8236}
@@ -8221,20 +8239,30 @@ static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
8221 __be16 protocol, 8239 __be16 protocol,
8222 struct tc_cls_u32_offload *cls) 8240 struct tc_cls_u32_offload *cls)
8223{ 8241{
8242 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8243
8244 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8245 return -EINVAL;
8246
8224 /* This ixgbe devices do not support hash tables at the moment 8247 /* This ixgbe devices do not support hash tables at the moment
8225 * so abort when given hash tables. 8248 * so abort when given hash tables.
8226 */ 8249 */
8227 if (cls->hnode.divisor > 0) 8250 if (cls->hnode.divisor > 0)
8228 return -EINVAL; 8251 return -EINVAL;
8229 8252
8230 set_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); 8253 set_bit(uhtid - 1, &adapter->tables);
8231 return 0; 8254 return 0;
8232} 8255}
8233 8256
8234static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, 8257static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
8235 struct tc_cls_u32_offload *cls) 8258 struct tc_cls_u32_offload *cls)
8236{ 8259{
8237 clear_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); 8260 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8261
8262 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8263 return -EINVAL;
8264
8265 clear_bit(uhtid - 1, &adapter->tables);
8238 return 0; 8266 return 0;
8239} 8267}
8240 8268
@@ -8252,46 +8280,46 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8252#endif 8280#endif
8253 int i, err = 0; 8281 int i, err = 0;
8254 u8 queue; 8282 u8 queue;
8255 u32 handle; 8283 u32 uhtid, link_uhtid;
8256 8284
8257 memset(&mask, 0, sizeof(union ixgbe_atr_input)); 8285 memset(&mask, 0, sizeof(union ixgbe_atr_input));
8258 handle = cls->knode.handle; 8286 uhtid = TC_U32_USERHTID(cls->knode.handle);
8287 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
8259 8288
8260 /* At the moment cls_u32 jumps to transport layer and skips past 8289 /* At the moment cls_u32 jumps to network layer and skips past
8261 * L2 headers. The canonical method to match L2 frames is to use 8290 * L2 headers. The canonical method to match L2 frames is to use
8262 * negative values. However this is error prone at best but really 8291 * negative values. However this is error prone at best but really
8263 * just broken because there is no way to "know" what sort of hdr 8292 * just broken because there is no way to "know" what sort of hdr
8264 * is in front of the transport layer. Fix cls_u32 to support L2 8293 * is in front of the network layer. Fix cls_u32 to support L2
8265 * headers when needed. 8294 * headers when needed.
8266 */ 8295 */
8267 if (protocol != htons(ETH_P_IP)) 8296 if (protocol != htons(ETH_P_IP))
8268 return -EINVAL; 8297 return -EINVAL;
8269 8298
8270 if (cls->knode.link_handle || 8299 if (link_uhtid) {
8271 cls->knode.link_handle >= IXGBE_MAX_LINK_HANDLE) {
8272 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; 8300 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
8273 u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle);
8274 8301
8275 if (!test_bit(uhtid, &adapter->tables)) 8302 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
8303 return -EINVAL;
8304
8305 if (!test_bit(link_uhtid - 1, &adapter->tables))
8276 return -EINVAL; 8306 return -EINVAL;
8277 8307
8278 for (i = 0; nexthdr[i].jump; i++) { 8308 for (i = 0; nexthdr[i].jump; i++) {
8279 if (nexthdr->o != cls->knode.sel->offoff || 8309 if (nexthdr[i].o != cls->knode.sel->offoff ||
8280 nexthdr->s != cls->knode.sel->offshift || 8310 nexthdr[i].s != cls->knode.sel->offshift ||
8281 nexthdr->m != cls->knode.sel->offmask || 8311 nexthdr[i].m != cls->knode.sel->offmask ||
8282 /* do not support multiple key jumps its just mad */ 8312 /* do not support multiple key jumps its just mad */
8283 cls->knode.sel->nkeys > 1) 8313 cls->knode.sel->nkeys > 1)
8284 return -EINVAL; 8314 return -EINVAL;
8285 8315
8286 if (nexthdr->off != cls->knode.sel->keys[0].off || 8316 if (nexthdr[i].off == cls->knode.sel->keys[0].off &&
8287 nexthdr->val != cls->knode.sel->keys[0].val || 8317 nexthdr[i].val == cls->knode.sel->keys[0].val &&
8288 nexthdr->mask != cls->knode.sel->keys[0].mask) 8318 nexthdr[i].mask == cls->knode.sel->keys[0].mask) {
8289 return -EINVAL; 8319 adapter->jump_tables[link_uhtid] =
8290 8320 nexthdr[i].jump;
8291 if (uhtid >= IXGBE_MAX_LINK_HANDLE) 8321 break;
8292 return -EINVAL; 8322 }
8293
8294 adapter->jump_tables[uhtid] = nexthdr->jump;
8295 } 8323 }
8296 return 0; 8324 return 0;
8297 } 8325 }
@@ -8308,13 +8336,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8308 * To add support for new nodes update ixgbe_model.h parse structures 8336 * To add support for new nodes update ixgbe_model.h parse structures
8309 * this function _should_ be generic try not to hardcode values here. 8337 * this function _should_ be generic try not to hardcode values here.
8310 */ 8338 */
8311 if (TC_U32_USERHTID(handle) == 0x800) { 8339 if (uhtid == 0x800) {
8312 field_ptr = adapter->jump_tables[0]; 8340 field_ptr = adapter->jump_tables[0];
8313 } else { 8341 } else {
8314 if (TC_U32_USERHTID(handle) >= ARRAY_SIZE(adapter->jump_tables)) 8342 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8315 return -EINVAL; 8343 return -EINVAL;
8316 8344
8317 field_ptr = adapter->jump_tables[TC_U32_USERHTID(handle)]; 8345 field_ptr = adapter->jump_tables[uhtid];
8318 } 8346 }
8319 8347
8320 if (!field_ptr) 8348 if (!field_ptr)
@@ -8332,8 +8360,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8332 int j; 8360 int j;
8333 8361
8334 for (j = 0; field_ptr[j].val; j++) { 8362 for (j = 0; field_ptr[j].val; j++) {
8335 if (field_ptr[j].off == off && 8363 if (field_ptr[j].off == off) {
8336 field_ptr[j].mask == m) {
8337 field_ptr[j].val(input, &mask, val, m); 8364 field_ptr[j].val(input, &mask, val, m);
8338 input->filter.formatted.flow_type |= 8365 input->filter.formatted.flow_type |=
8339 field_ptr[j].type; 8366 field_ptr[j].type;
@@ -8393,8 +8420,8 @@ err_out:
8393 return -EINVAL; 8420 return -EINVAL;
8394} 8421}
8395 8422
8396int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 8423static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
8397 struct tc_to_netdev *tc) 8424 struct tc_to_netdev *tc)
8398{ 8425{
8399 struct ixgbe_adapter *adapter = netdev_priv(dev); 8426 struct ixgbe_adapter *adapter = netdev_priv(dev);
8400 8427
@@ -8517,11 +8544,6 @@ static int ixgbe_set_features(struct net_device *netdev,
8517 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 8544 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
8518 } 8545 }
8519 8546
8520 if (features & NETIF_F_HW_VLAN_CTAG_RX)
8521 ixgbe_vlan_strip_enable(adapter);
8522 else
8523 ixgbe_vlan_strip_disable(adapter);
8524
8525 if (changed & NETIF_F_RXALL) 8547 if (changed & NETIF_F_RXALL)
8526 need_reset = true; 8548 need_reset = true;
8527 8549
@@ -8538,6 +8560,9 @@ static int ixgbe_set_features(struct net_device *netdev,
8538 8560
8539 if (need_reset) 8561 if (need_reset)
8540 ixgbe_do_reset(netdev); 8562 ixgbe_do_reset(netdev);
8563 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
8564 NETIF_F_HW_VLAN_CTAG_FILTER))
8565 ixgbe_set_rx_mode(netdev);
8541 8566
8542 return 0; 8567 return 0;
8543} 8568}
@@ -8554,7 +8579,6 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8554{ 8579{
8555 struct ixgbe_adapter *adapter = netdev_priv(dev); 8580 struct ixgbe_adapter *adapter = netdev_priv(dev);
8556 struct ixgbe_hw *hw = &adapter->hw; 8581 struct ixgbe_hw *hw = &adapter->hw;
8557 u16 new_port = ntohs(port);
8558 8582
8559 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) 8583 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8560 return; 8584 return;
@@ -8562,18 +8586,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8562 if (sa_family == AF_INET6) 8586 if (sa_family == AF_INET6)
8563 return; 8587 return;
8564 8588
8565 if (adapter->vxlan_port == new_port) 8589 if (adapter->vxlan_port == port)
8566 return; 8590 return;
8567 8591
8568 if (adapter->vxlan_port) { 8592 if (adapter->vxlan_port) {
8569 netdev_info(dev, 8593 netdev_info(dev,
8570 "Hit Max num of VXLAN ports, not adding port %d\n", 8594 "Hit Max num of VXLAN ports, not adding port %d\n",
8571 new_port); 8595 ntohs(port));
8572 return; 8596 return;
8573 } 8597 }
8574 8598
8575 adapter->vxlan_port = new_port; 8599 adapter->vxlan_port = port;
8576 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port); 8600 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port));
8577} 8601}
8578 8602
8579/** 8603/**
@@ -8586,7 +8610,6 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8586 __be16 port) 8610 __be16 port)
8587{ 8611{
8588 struct ixgbe_adapter *adapter = netdev_priv(dev); 8612 struct ixgbe_adapter *adapter = netdev_priv(dev);
8589 u16 new_port = ntohs(port);
8590 8613
8591 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) 8614 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8592 return; 8615 return;
@@ -8594,9 +8617,9 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8594 if (sa_family == AF_INET6) 8617 if (sa_family == AF_INET6)
8595 return; 8618 return;
8596 8619
8597 if (adapter->vxlan_port != new_port) { 8620 if (adapter->vxlan_port != port) {
8598 netdev_info(dev, "Port %d was not found, not deleting\n", 8621 netdev_info(dev, "Port %d was not found, not deleting\n",
8599 new_port); 8622 ntohs(port));
8600 return; 8623 return;
8601 } 8624 }
8602 8625
@@ -8862,6 +8885,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
8862 .ndo_set_mac_address = ixgbe_set_mac, 8885 .ndo_set_mac_address = ixgbe_set_mac,
8863 .ndo_change_mtu = ixgbe_change_mtu, 8886 .ndo_change_mtu = ixgbe_change_mtu,
8864 .ndo_tx_timeout = ixgbe_tx_timeout, 8887 .ndo_tx_timeout = ixgbe_tx_timeout,
8888 .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
8865 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, 8889 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
8866 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, 8890 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
8867 .ndo_do_ioctl = ixgbe_ioctl, 8891 .ndo_do_ioctl = ixgbe_ioctl,
@@ -8999,6 +9023,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
8999 case IXGBE_DEV_ID_X540T: 9023 case IXGBE_DEV_ID_X540T:
9000 case IXGBE_DEV_ID_X540T1: 9024 case IXGBE_DEV_ID_X540T1:
9001 case IXGBE_DEV_ID_X550T: 9025 case IXGBE_DEV_ID_X550T:
9026 case IXGBE_DEV_ID_X550T1:
9002 case IXGBE_DEV_ID_X550EM_X_KX4: 9027 case IXGBE_DEV_ID_X550EM_X_KX4:
9003 case IXGBE_DEV_ID_X550EM_X_KR: 9028 case IXGBE_DEV_ID_X550EM_X_KR:
9004 case IXGBE_DEV_ID_X550EM_X_10G_T: 9029 case IXGBE_DEV_ID_X550EM_X_10G_T:
@@ -9015,29 +9040,6 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
9015} 9040}
9016 9041
9017/** 9042/**
9018 * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
9019 * @adapter: Pointer to adapter struct
9020 */
9021static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
9022{
9023#ifdef CONFIG_OF
9024 struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
9025 struct ixgbe_hw *hw = &adapter->hw;
9026 const unsigned char *addr;
9027
9028 addr = of_get_mac_address(dp);
9029 if (addr) {
9030 ether_addr_copy(hw->mac.perm_addr, addr);
9031 return;
9032 }
9033#endif /* CONFIG_OF */
9034
9035#ifdef CONFIG_SPARC
9036 ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
9037#endif /* CONFIG_SPARC */
9038}
9039
9040/**
9041 * ixgbe_probe - Device Initialization Routine 9043 * ixgbe_probe - Device Initialization Routine
9042 * @pdev: PCI device information struct 9044 * @pdev: PCI device information struct
9043 * @ent: entry in ixgbe_pci_tbl 9045 * @ent: entry in ixgbe_pci_tbl
@@ -9140,12 +9142,12 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9140 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 9142 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
9141 9143
9142 /* Setup hw api */ 9144 /* Setup hw api */
9143 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 9145 hw->mac.ops = *ii->mac_ops;
9144 hw->mac.type = ii->mac; 9146 hw->mac.type = ii->mac;
9145 hw->mvals = ii->mvals; 9147 hw->mvals = ii->mvals;
9146 9148
9147 /* EEPROM */ 9149 /* EEPROM */
9148 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); 9150 hw->eeprom.ops = *ii->eeprom_ops;
9149 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); 9151 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
9150 if (ixgbe_removed(hw->hw_addr)) { 9152 if (ixgbe_removed(hw->hw_addr)) {
9151 err = -EIO; 9153 err = -EIO;
@@ -9156,7 +9158,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9156 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; 9158 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
9157 9159
9158 /* PHY */ 9160 /* PHY */
9159 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); 9161 hw->phy.ops = *ii->phy_ops;
9160 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 9162 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
9161 /* ixgbe_identify_phy_generic will set prtad and mmds properly */ 9163 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
9162 hw->phy.mdio.prtad = MDIO_PRTAD_NONE; 9164 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
@@ -9173,12 +9175,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9173 if (err) 9175 if (err)
9174 goto err_sw_init; 9176 goto err_sw_init;
9175 9177
9178 /* Make sure the SWFW semaphore is in a valid state */
9179 if (hw->mac.ops.init_swfw_sync)
9180 hw->mac.ops.init_swfw_sync(hw);
9181
9176 /* Make it possible the adapter to be woken up via WOL */ 9182 /* Make it possible the adapter to be woken up via WOL */
9177 switch (adapter->hw.mac.type) { 9183 switch (adapter->hw.mac.type) {
9178 case ixgbe_mac_82599EB: 9184 case ixgbe_mac_82599EB:
9179 case ixgbe_mac_X540: 9185 case ixgbe_mac_X540:
9180 case ixgbe_mac_X550: 9186 case ixgbe_mac_X550:
9181 case ixgbe_mac_X550EM_x: 9187 case ixgbe_mac_X550EM_x:
9188 case ixgbe_mac_x550em_a:
9182 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 9189 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
9183 break; 9190 break;
9184 default: 9191 default:
@@ -9219,63 +9226,46 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9219 goto skip_sriov; 9226 goto skip_sriov;
9220 /* Mailbox */ 9227 /* Mailbox */
9221 ixgbe_init_mbx_params_pf(hw); 9228 ixgbe_init_mbx_params_pf(hw);
9222 memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); 9229 hw->mbx.ops = ii->mbx_ops;
9223 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); 9230 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
9224 ixgbe_enable_sriov(adapter); 9231 ixgbe_enable_sriov(adapter);
9225skip_sriov: 9232skip_sriov:
9226 9233
9227#endif 9234#endif
9228 netdev->features = NETIF_F_SG | 9235 netdev->features = NETIF_F_SG |
9229 NETIF_F_IP_CSUM |
9230 NETIF_F_IPV6_CSUM |
9231 NETIF_F_HW_VLAN_CTAG_TX |
9232 NETIF_F_HW_VLAN_CTAG_RX |
9233 NETIF_F_TSO | 9236 NETIF_F_TSO |
9234 NETIF_F_TSO6 | 9237 NETIF_F_TSO6 |
9235 NETIF_F_RXHASH | 9238 NETIF_F_RXHASH |
9236 NETIF_F_RXCSUM; 9239 NETIF_F_RXCSUM |
9237 9240 NETIF_F_HW_CSUM |
9238 netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; 9241 NETIF_F_HW_VLAN_CTAG_TX |
9242 NETIF_F_HW_VLAN_CTAG_RX |
9243 NETIF_F_HW_VLAN_CTAG_FILTER;
9239 9244
9240 switch (adapter->hw.mac.type) { 9245 if (hw->mac.type >= ixgbe_mac_82599EB)
9241 case ixgbe_mac_82599EB:
9242 case ixgbe_mac_X540:
9243 case ixgbe_mac_X550:
9244 case ixgbe_mac_X550EM_x:
9245 netdev->features |= NETIF_F_SCTP_CRC; 9246 netdev->features |= NETIF_F_SCTP_CRC;
9246 netdev->hw_features |= NETIF_F_SCTP_CRC |
9247 NETIF_F_NTUPLE |
9248 NETIF_F_HW_TC;
9249 break;
9250 default:
9251 break;
9252 }
9253 9247
9254 netdev->hw_features |= NETIF_F_RXALL; 9248 /* copy netdev features into list of user selectable features */
9255 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 9249 netdev->hw_features |= netdev->features;
9250 netdev->hw_features |= NETIF_F_RXALL |
9251 NETIF_F_HW_L2FW_DOFFLOAD;
9256 9252
9257 netdev->vlan_features |= NETIF_F_TSO; 9253 if (hw->mac.type >= ixgbe_mac_82599EB)
9258 netdev->vlan_features |= NETIF_F_TSO6; 9254 netdev->hw_features |= NETIF_F_NTUPLE |
9259 netdev->vlan_features |= NETIF_F_IP_CSUM; 9255 NETIF_F_HW_TC;
9260 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 9256
9261 netdev->vlan_features |= NETIF_F_SG; 9257 netdev->vlan_features |= NETIF_F_SG |
9258 NETIF_F_TSO |
9259 NETIF_F_TSO6 |
9260 NETIF_F_HW_CSUM |
9261 NETIF_F_SCTP_CRC;
9262 9262
9263 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 9263 netdev->mpls_features |= NETIF_F_HW_CSUM;
9264 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
9264 9265
9265 netdev->priv_flags |= IFF_UNICAST_FLT; 9266 netdev->priv_flags |= IFF_UNICAST_FLT;
9266 netdev->priv_flags |= IFF_SUPP_NOFCS; 9267 netdev->priv_flags |= IFF_SUPP_NOFCS;
9267 9268
9268#ifdef CONFIG_IXGBE_VXLAN
9269 switch (adapter->hw.mac.type) {
9270 case ixgbe_mac_X550:
9271 case ixgbe_mac_X550EM_x:
9272 netdev->hw_enc_features |= NETIF_F_RXCSUM;
9273 break;
9274 default:
9275 break;
9276 }
9277#endif /* CONFIG_IXGBE_VXLAN */
9278
9279#ifdef CONFIG_IXGBE_DCB 9269#ifdef CONFIG_IXGBE_DCB
9280 netdev->dcbnl_ops = &dcbnl_ops; 9270 netdev->dcbnl_ops = &dcbnl_ops;
9281#endif 9271#endif
@@ -9319,7 +9309,8 @@ skip_sriov:
9319 goto err_sw_init; 9309 goto err_sw_init;
9320 } 9310 }
9321 9311
9322 ixgbe_get_platform_mac_addr(adapter); 9312 eth_platform_get_mac_address(&adapter->pdev->dev,
9313 adapter->hw.mac.perm_addr);
9323 9314
9324 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); 9315 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
9325 9316
@@ -9329,6 +9320,8 @@ skip_sriov:
9329 goto err_sw_init; 9320 goto err_sw_init;
9330 } 9321 }
9331 9322
9323 /* Set hw->mac.addr to permanent MAC address */
9324 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
9332 ixgbe_mac_set_default_filter(adapter); 9325 ixgbe_mac_set_default_filter(adapter);
9333 9326
9334 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 9327 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
@@ -9625,6 +9618,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
9625 case ixgbe_mac_X550EM_x: 9618 case ixgbe_mac_X550EM_x:
9626 device_id = IXGBE_DEV_ID_X550EM_X_VF; 9619 device_id = IXGBE_DEV_ID_X550EM_X_VF;
9627 break; 9620 break;
9621 case ixgbe_mac_x550em_a:
9622 device_id = IXGBE_DEV_ID_X550EM_A_VF;
9623 break;
9628 default: 9624 default:
9629 device_id = 0; 9625 device_id = 0;
9630 break; 9626 break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 9993a471d668..b2125e358f7b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -48,10 +48,10 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
48 if (size > mbx->size) 48 if (size > mbx->size)
49 size = mbx->size; 49 size = mbx->size;
50 50
51 if (!mbx->ops.read) 51 if (!mbx->ops)
52 return IXGBE_ERR_MBX; 52 return IXGBE_ERR_MBX;
53 53
54 return mbx->ops.read(hw, msg, size, mbx_id); 54 return mbx->ops->read(hw, msg, size, mbx_id);
55} 55}
56 56
57/** 57/**
@@ -70,10 +70,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
70 if (size > mbx->size) 70 if (size > mbx->size)
71 return IXGBE_ERR_MBX; 71 return IXGBE_ERR_MBX;
72 72
73 if (!mbx->ops.write) 73 if (!mbx->ops)
74 return IXGBE_ERR_MBX; 74 return IXGBE_ERR_MBX;
75 75
76 return mbx->ops.write(hw, msg, size, mbx_id); 76 return mbx->ops->write(hw, msg, size, mbx_id);
77} 77}
78 78
79/** 79/**
@@ -87,10 +87,10 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
87{ 87{
88 struct ixgbe_mbx_info *mbx = &hw->mbx; 88 struct ixgbe_mbx_info *mbx = &hw->mbx;
89 89
90 if (!mbx->ops.check_for_msg) 90 if (!mbx->ops)
91 return IXGBE_ERR_MBX; 91 return IXGBE_ERR_MBX;
92 92
93 return mbx->ops.check_for_msg(hw, mbx_id); 93 return mbx->ops->check_for_msg(hw, mbx_id);
94} 94}
95 95
96/** 96/**
@@ -104,10 +104,10 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
104{ 104{
105 struct ixgbe_mbx_info *mbx = &hw->mbx; 105 struct ixgbe_mbx_info *mbx = &hw->mbx;
106 106
107 if (!mbx->ops.check_for_ack) 107 if (!mbx->ops)
108 return IXGBE_ERR_MBX; 108 return IXGBE_ERR_MBX;
109 109
110 return mbx->ops.check_for_ack(hw, mbx_id); 110 return mbx->ops->check_for_ack(hw, mbx_id);
111} 111}
112 112
113/** 113/**
@@ -121,10 +121,10 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
121{ 121{
122 struct ixgbe_mbx_info *mbx = &hw->mbx; 122 struct ixgbe_mbx_info *mbx = &hw->mbx;
123 123
124 if (!mbx->ops.check_for_rst) 124 if (!mbx->ops)
125 return IXGBE_ERR_MBX; 125 return IXGBE_ERR_MBX;
126 126
127 return mbx->ops.check_for_rst(hw, mbx_id); 127 return mbx->ops->check_for_rst(hw, mbx_id);
128} 128}
129 129
130/** 130/**
@@ -139,10 +139,10 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
139 struct ixgbe_mbx_info *mbx = &hw->mbx; 139 struct ixgbe_mbx_info *mbx = &hw->mbx;
140 int countdown = mbx->timeout; 140 int countdown = mbx->timeout;
141 141
142 if (!countdown || !mbx->ops.check_for_msg) 142 if (!countdown || !mbx->ops)
143 return IXGBE_ERR_MBX; 143 return IXGBE_ERR_MBX;
144 144
145 while (mbx->ops.check_for_msg(hw, mbx_id)) { 145 while (mbx->ops->check_for_msg(hw, mbx_id)) {
146 countdown--; 146 countdown--;
147 if (!countdown) 147 if (!countdown)
148 return IXGBE_ERR_MBX; 148 return IXGBE_ERR_MBX;
@@ -164,10 +164,10 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
164 struct ixgbe_mbx_info *mbx = &hw->mbx; 164 struct ixgbe_mbx_info *mbx = &hw->mbx;
165 int countdown = mbx->timeout; 165 int countdown = mbx->timeout;
166 166
167 if (!countdown || !mbx->ops.check_for_ack) 167 if (!countdown || !mbx->ops)
168 return IXGBE_ERR_MBX; 168 return IXGBE_ERR_MBX;
169 169
170 while (mbx->ops.check_for_ack(hw, mbx_id)) { 170 while (mbx->ops->check_for_ack(hw, mbx_id)) {
171 countdown--; 171 countdown--;
172 if (!countdown) 172 if (!countdown)
173 return IXGBE_ERR_MBX; 173 return IXGBE_ERR_MBX;
@@ -193,7 +193,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
193 struct ixgbe_mbx_info *mbx = &hw->mbx; 193 struct ixgbe_mbx_info *mbx = &hw->mbx;
194 s32 ret_val; 194 s32 ret_val;
195 195
196 if (!mbx->ops.read) 196 if (!mbx->ops)
197 return IXGBE_ERR_MBX; 197 return IXGBE_ERR_MBX;
198 198
199 ret_val = ixgbe_poll_for_msg(hw, mbx_id); 199 ret_val = ixgbe_poll_for_msg(hw, mbx_id);
@@ -201,7 +201,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
201 return ret_val; 201 return ret_val;
202 202
203 /* if ack received read message */ 203 /* if ack received read message */
204 return mbx->ops.read(hw, msg, size, mbx_id); 204 return mbx->ops->read(hw, msg, size, mbx_id);
205} 205}
206 206
207/** 207/**
@@ -221,11 +221,11 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
221 s32 ret_val; 221 s32 ret_val;
222 222
223 /* exit if either we can't write or there isn't a defined timeout */ 223 /* exit if either we can't write or there isn't a defined timeout */
224 if (!mbx->ops.write || !mbx->timeout) 224 if (!mbx->ops || !mbx->timeout)
225 return IXGBE_ERR_MBX; 225 return IXGBE_ERR_MBX;
226 226
227 /* send msg */ 227 /* send msg */
228 ret_val = mbx->ops.write(hw, msg, size, mbx_id); 228 ret_val = mbx->ops->write(hw, msg, size, mbx_id);
229 if (ret_val) 229 if (ret_val)
230 return ret_val; 230 return ret_val;
231 231
@@ -307,6 +307,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
307 case ixgbe_mac_X540: 307 case ixgbe_mac_X540:
308 case ixgbe_mac_X550: 308 case ixgbe_mac_X550:
309 case ixgbe_mac_X550EM_x: 309 case ixgbe_mac_X550EM_x:
310 case ixgbe_mac_x550em_a:
310 vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); 311 vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
311 break; 312 break;
312 default: 313 default:
@@ -430,6 +431,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
430 if (hw->mac.type != ixgbe_mac_82599EB && 431 if (hw->mac.type != ixgbe_mac_82599EB &&
431 hw->mac.type != ixgbe_mac_X550 && 432 hw->mac.type != ixgbe_mac_X550 &&
432 hw->mac.type != ixgbe_mac_X550EM_x && 433 hw->mac.type != ixgbe_mac_X550EM_x &&
434 hw->mac.type != ixgbe_mac_x550em_a &&
433 hw->mac.type != ixgbe_mac_X540) 435 hw->mac.type != ixgbe_mac_X540)
434 return; 436 return;
435 437
@@ -446,7 +448,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
446} 448}
447#endif /* CONFIG_PCI_IOV */ 449#endif /* CONFIG_PCI_IOV */
448 450
449struct ixgbe_mbx_operations mbx_ops_generic = { 451const struct ixgbe_mbx_operations mbx_ops_generic = {
450 .read = ixgbe_read_mbx_pf, 452 .read = ixgbe_read_mbx_pf,
451 .write = ixgbe_write_mbx_pf, 453 .write = ixgbe_write_mbx_pf,
452 .read_posted = ixgbe_read_posted_mbx, 454 .read_posted = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 8daa95f74548..01c2667c0f92 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -123,6 +123,6 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
123void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); 123void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
124#endif /* CONFIG_PCI_IOV */ 124#endif /* CONFIG_PCI_IOV */
125 125
126extern struct ixgbe_mbx_operations mbx_ops_generic; 126extern const struct ixgbe_mbx_operations mbx_ops_generic;
127 127
128#endif /* _IXGBE_MBX_H_ */ 128#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index ce48872d4782..60adde55a8c3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -32,7 +32,6 @@
32 32
33struct ixgbe_mat_field { 33struct ixgbe_mat_field {
34 unsigned int off; 34 unsigned int off;
35 unsigned int mask;
36 int (*val)(struct ixgbe_fdir_filter *input, 35 int (*val)(struct ixgbe_fdir_filter *input,
37 union ixgbe_atr_input *mask, 36 union ixgbe_atr_input *mask,
38 u32 val, u32 m); 37 u32 val, u32 m);
@@ -58,39 +57,37 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input,
58} 57}
59 58
60static struct ixgbe_mat_field ixgbe_ipv4_fields[] = { 59static struct ixgbe_mat_field ixgbe_ipv4_fields[] = {
61 { .off = 12, .mask = -1, .val = ixgbe_mat_prgm_sip, 60 { .off = 12, .val = ixgbe_mat_prgm_sip,
62 .type = IXGBE_ATR_FLOW_TYPE_IPV4}, 61 .type = IXGBE_ATR_FLOW_TYPE_IPV4},
63 { .off = 16, .mask = -1, .val = ixgbe_mat_prgm_dip, 62 { .off = 16, .val = ixgbe_mat_prgm_dip,
64 .type = IXGBE_ATR_FLOW_TYPE_IPV4}, 63 .type = IXGBE_ATR_FLOW_TYPE_IPV4},
65 { .val = NULL } /* terminal node */ 64 { .val = NULL } /* terminal node */
66}; 65};
67 66
68static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input, 67static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input,
69 union ixgbe_atr_input *mask, 68 union ixgbe_atr_input *mask,
70 u32 val, u32 m) 69 u32 val, u32 m)
71{ 70{
72 input->filter.formatted.src_port = val & 0xffff; 71 input->filter.formatted.src_port = val & 0xffff;
73 mask->formatted.src_port = m & 0xffff; 72 mask->formatted.src_port = m & 0xffff;
74 return 0; 73 input->filter.formatted.dst_port = val >> 16;
75}; 74 mask->formatted.dst_port = m >> 16;
76 75
77static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input,
78 union ixgbe_atr_input *mask,
79 u32 val, u32 m)
80{
81 input->filter.formatted.dst_port = val & 0xffff;
82 mask->formatted.dst_port = m & 0xffff;
83 return 0; 76 return 0;
84}; 77};
85 78
86static struct ixgbe_mat_field ixgbe_tcp_fields[] = { 79static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
87 {.off = 0, .mask = 0xffff, .val = ixgbe_mat_prgm_sport, 80 {.off = 0, .val = ixgbe_mat_prgm_ports,
88 .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
89 {.off = 2, .mask = 0xffff, .val = ixgbe_mat_prgm_dport,
90 .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, 81 .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
91 { .val = NULL } /* terminal node */ 82 { .val = NULL } /* terminal node */
92}; 83};
93 84
85static struct ixgbe_mat_field ixgbe_udp_fields[] = {
86 {.off = 0, .val = ixgbe_mat_prgm_ports,
87 .type = IXGBE_ATR_FLOW_TYPE_UDPV4},
88 { .val = NULL } /* terminal node */
89};
90
94struct ixgbe_nexthdr { 91struct ixgbe_nexthdr {
95 /* offset, shift, and mask of position to next header */ 92 /* offset, shift, and mask of position to next header */
96 unsigned int o; 93 unsigned int o;
@@ -107,6 +104,8 @@ struct ixgbe_nexthdr {
107static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = { 104static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = {
108 { .o = 0, .s = 6, .m = 0xf, 105 { .o = 0, .s = 6, .m = 0xf,
109 .off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields}, 106 .off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields},
107 { .o = 0, .s = 6, .m = 0xf,
108 .off = 8, .val = 0x1100, .mask = 0xff00, .jump = ixgbe_udp_fields},
110 { .jump = NULL } /* terminal node */ 109 { .jump = NULL } /* terminal node */
111}; 110};
112#endif /* _IXGBE_MODEL_H_ */ 111#endif /* _IXGBE_MODEL_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 5abd66c84d00..cdf4c3800801 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -81,7 +81,11 @@
81#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 81#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
82#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 82#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
83#define IXGBE_CS4227 0xBE /* CS4227 address */ 83#define IXGBE_CS4227 0xBE /* CS4227 address */
84#define IXGBE_CS4227_GLOBAL_ID_LSB 0
85#define IXGBE_CS4227_GLOBAL_ID_MSB 1
84#define IXGBE_CS4227_SCRATCH 2 86#define IXGBE_CS4227_SCRATCH 2
87#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */
88#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */
85#define IXGBE_CS4227_RESET_PENDING 0x1357 89#define IXGBE_CS4227_RESET_PENDING 0x1357
86#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 90#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
87#define IXGBE_CS4227_RETRIES 15 91#define IXGBE_CS4227_RETRIES 15
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ef1504d41890..bdc8fdcc07a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2015 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -333,6 +333,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
333 */ 333 */
334 case ixgbe_mac_X550: 334 case ixgbe_mac_X550:
335 case ixgbe_mac_X550EM_x: 335 case ixgbe_mac_X550EM_x:
336 case ixgbe_mac_x550em_a:
336 /* Upper 32 bits represent billions of cycles, lower 32 bits 337 /* Upper 32 bits represent billions of cycles, lower 32 bits
337 * represent cycles. However, we use timespec64_to_ns for the 338 * represent cycles. However, we use timespec64_to_ns for the
338 * correct math even though the units haven't been corrected 339 * correct math even though the units haven't been corrected
@@ -921,6 +922,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
921 switch (hw->mac.type) { 922 switch (hw->mac.type) {
922 case ixgbe_mac_X550: 923 case ixgbe_mac_X550:
923 case ixgbe_mac_X550EM_x: 924 case ixgbe_mac_X550EM_x:
925 case ixgbe_mac_x550em_a:
924 /* enable timestamping all packets only if at least some 926 /* enable timestamping all packets only if at least some
925 * packets were requested. Otherwise, play nice and disable 927 * packets were requested. Otherwise, play nice and disable
926 * timestamping 928 * timestamping
@@ -1083,6 +1085,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
1083 cc.shift = 2; 1085 cc.shift = 2;
1084 } 1086 }
1085 /* fallthrough */ 1087 /* fallthrough */
1088 case ixgbe_mac_x550em_a:
1086 case ixgbe_mac_X550: 1089 case ixgbe_mac_X550:
1087 cc.read = ixgbe_ptp_read_X550; 1090 cc.read = ixgbe_ptp_read_X550;
1088 1091
@@ -1223,6 +1226,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
1223 break; 1226 break;
1224 case ixgbe_mac_X550: 1227 case ixgbe_mac_X550:
1225 case ixgbe_mac_X550EM_x: 1228 case ixgbe_mac_X550EM_x:
1229 case ixgbe_mac_x550em_a:
1226 snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); 1230 snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
1227 adapter->ptp_caps.owner = THIS_MODULE; 1231 adapter->ptp_caps.owner = THIS_MODULE;
1228 adapter->ptp_caps.max_adj = 30000000; 1232 adapter->ptp_caps.max_adj = 30000000;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8025a3f93598..adcf00002483 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -589,40 +589,40 @@ static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
589static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) 589static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
590{ 590{
591 struct ixgbe_hw *hw = &adapter->hw; 591 struct ixgbe_hw *hw = &adapter->hw;
592 u32 i; 592 u32 vlvfb_mask, pool_mask, i;
593
594 /* create mask for VF and other pools */
595 pool_mask = ~(1 << (VMDQ_P(0) % 32));
596 vlvfb_mask = 1 << (vf % 32);
593 597
594 /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ 598 /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
595 for (i = IXGBE_VLVF_ENTRIES; i--;) { 599 for (i = IXGBE_VLVF_ENTRIES; i--;) {
596 u32 bits[2], vlvfb, vid, vfta, vlvf; 600 u32 bits[2], vlvfb, vid, vfta, vlvf;
597 u32 word = i * 2 + vf / 32; 601 u32 word = i * 2 + vf / 32;
598 u32 mask = 1 << (vf % 32); 602 u32 mask;
599 603
600 vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); 604 vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
601 605
602 /* if our bit isn't set we can skip it */ 606 /* if our bit isn't set we can skip it */
603 if (!(vlvfb & mask)) 607 if (!(vlvfb & vlvfb_mask))
604 continue; 608 continue;
605 609
606 /* clear our bit from vlvfb */ 610 /* clear our bit from vlvfb */
607 vlvfb ^= mask; 611 vlvfb ^= vlvfb_mask;
608 612
609 /* create 64b mask to chedk to see if we should clear VLVF */ 613 /* create 64b mask to chedk to see if we should clear VLVF */
610 bits[word % 2] = vlvfb; 614 bits[word % 2] = vlvfb;
611 bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); 615 bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
612 616
613 /* if promisc is enabled, PF will be present, leave VFTA */
614 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) {
615 bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32));
616
617 if (bits[0] || bits[1])
618 goto update_vlvfb;
619 goto update_vlvf;
620 }
621
622 /* if other pools are present, just remove ourselves */ 617 /* if other pools are present, just remove ourselves */
623 if (bits[0] || bits[1]) 618 if (bits[(VMDQ_P(0) / 32) ^ 1] ||
619 (bits[VMDQ_P(0) / 32] & pool_mask))
624 goto update_vlvfb; 620 goto update_vlvfb;
625 621
622 /* if PF is present, leave VFTA */
623 if (bits[0] || bits[1])
624 goto update_vlvf;
625
626 /* if we cannot determine VLAN just remove ourselves */ 626 /* if we cannot determine VLAN just remove ourselves */
627 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); 627 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
628 if (!vlvf) 628 if (!vlvf)
@@ -638,6 +638,9 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
638update_vlvf: 638update_vlvf:
639 /* clear POOL selection enable */ 639 /* clear POOL selection enable */
640 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); 640 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
641
642 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
643 vlvfb = 0;
641update_vlvfb: 644update_vlvfb:
642 /* clear pool bits */ 645 /* clear pool bits */
643 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); 646 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
@@ -887,7 +890,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
887 return -1; 890 return -1;
888 } 891 }
889 892
890 if (adapter->vfinfo[vf].pf_set_mac && 893 if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
891 !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { 894 !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
892 e_warn(drv, 895 e_warn(drv,
893 "VF %d attempted to override administratively set MAC address\n" 896 "VF %d attempted to override administratively set MAC address\n"
@@ -1395,7 +1398,7 @@ out:
1395 return err; 1398 return err;
1396} 1399}
1397 1400
1398static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) 1401int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
1399{ 1402{
1400 switch (adapter->link_speed) { 1403 switch (adapter->link_speed) {
1401 case IXGBE_LINK_SPEED_100_FULL: 1404 case IXGBE_LINK_SPEED_100_FULL:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index dad925706f4c..47e65e2f886a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -44,6 +44,7 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
44int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); 44int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
45int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, 45int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
46 u8 qos); 46 u8 qos);
47int ixgbe_link_mbps(struct ixgbe_adapter *adapter);
47int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, 48int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
48 int max_tx_rate); 49 int max_tx_rate);
49int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); 50int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index bf7367a08716..ba3b837c7e9d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2015 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -75,21 +75,29 @@
75#define IXGBE_DEV_ID_X540T1 0x1560 75#define IXGBE_DEV_ID_X540T1 0x1560
76 76
77#define IXGBE_DEV_ID_X550T 0x1563 77#define IXGBE_DEV_ID_X550T 0x1563
78#define IXGBE_DEV_ID_X550T1 0x15D1
78#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA 79#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
79#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB 80#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
80#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC 81#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
81#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD 82#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
82#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE 83#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
84#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
85#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
86#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
87#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6
88#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
89#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
90
91/* VF Device IDs */
83#define IXGBE_DEV_ID_X550_VF_HV 0x1564 92#define IXGBE_DEV_ID_X550_VF_HV 0x1564
84#define IXGBE_DEV_ID_X550_VF 0x1565 93#define IXGBE_DEV_ID_X550_VF 0x1565
85#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 94#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
86#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 95#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
87
88/* VF Device IDs */
89#define IXGBE_DEV_ID_82599_VF 0x10ED 96#define IXGBE_DEV_ID_82599_VF 0x10ED
90#define IXGBE_DEV_ID_X540_VF 0x1515 97#define IXGBE_DEV_ID_X540_VF 0x1515
91#define IXGBE_DEV_ID_X550_VF 0x1565 98#define IXGBE_DEV_ID_X550_VF 0x1565
92#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 99#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
100#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
93 101
94#define IXGBE_CAT(r, m) IXGBE_##r##_##m 102#define IXGBE_CAT(r, m) IXGBE_##r##_##m
95 103
@@ -128,7 +136,7 @@
128#define IXGBE_FLA_X540 IXGBE_FLA_8259X 136#define IXGBE_FLA_X540 IXGBE_FLA_8259X
129#define IXGBE_FLA_X550 IXGBE_FLA_8259X 137#define IXGBE_FLA_X550 IXGBE_FLA_8259X
130#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X 138#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X
131#define IXGBE_FLA_X550EM_a 0x15F6C 139#define IXGBE_FLA_X550EM_a 0x15F68
132#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA) 140#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA)
133#define IXGBE_EEMNGCTL 0x10110 141#define IXGBE_EEMNGCTL 0x10110
134#define IXGBE_EEMNGDATA 0x10114 142#define IXGBE_EEMNGDATA 0x10114
@@ -143,13 +151,6 @@
143#define IXGBE_GRC_X550EM_a 0x15F64 151#define IXGBE_GRC_X550EM_a 0x15F64
144#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC) 152#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC)
145 153
146#define IXGBE_SRAMREL_8259X 0x10210
147#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X
148#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X
149#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X
150#define IXGBE_SRAMREL_X550EM_a 0x15F6C
151#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL)
152
153/* General Receive Control */ 154/* General Receive Control */
154#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ 155#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
155#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ 156#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
@@ -375,6 +376,8 @@ struct ixgbe_thermal_sensor_data {
375#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) 376#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
376#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) 377#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
377#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) 378#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
379#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */
380#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */
378#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ 381#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
379#define IXGBE_RXFECCERR0 0x051B8 382#define IXGBE_RXFECCERR0 0x051B8
380#define IXGBE_LLITHRESH 0x0EC90 383#define IXGBE_LLITHRESH 0x0EC90
@@ -446,6 +449,8 @@ struct ixgbe_thermal_sensor_data {
446#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ 449#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
447#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ 450#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
448#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ 451#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
452#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */
453#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */
449#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ 454#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
450 455
451#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ 456#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
@@ -554,7 +559,6 @@ struct ixgbe_thermal_sensor_data {
554#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ 559#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
555#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ 560#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
556 561
557
558/* Security Control Registers */ 562/* Security Control Registers */
559#define IXGBE_SECTXCTRL 0x08800 563#define IXGBE_SECTXCTRL 0x08800
560#define IXGBE_SECTXSTAT 0x08804 564#define IXGBE_SECTXSTAT 0x08804
@@ -1203,6 +1207,8 @@ struct ixgbe_thermal_sensor_data {
1203#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ 1207#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
1204#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ 1208#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
1205#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ 1209#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
1210#define IXGBE_RDRXCTL_MBINTEN 0x10000000
1211#define IXGBE_RDRXCTL_MDP_EN 0x20000000
1206 1212
1207/* RQTC Bit Masks and Shifts */ 1213/* RQTC Bit Masks and Shifts */
1208#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) 1214#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
@@ -1309,6 +1315,7 @@ struct ixgbe_thermal_sensor_data {
1309 1315
1310/* MDIO definitions */ 1316/* MDIO definitions */
1311 1317
1318#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0
1312#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 1319#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
1313#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 1320#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
1314#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 1321#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
@@ -1957,7 +1964,9 @@ enum {
1957#define IXGBE_GSSR_PHY1_SM 0x0004 1964#define IXGBE_GSSR_PHY1_SM 0x0004
1958#define IXGBE_GSSR_MAC_CSR_SM 0x0008 1965#define IXGBE_GSSR_MAC_CSR_SM 0x0008
1959#define IXGBE_GSSR_FLASH_SM 0x0010 1966#define IXGBE_GSSR_FLASH_SM 0x0010
1967#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200
1960#define IXGBE_GSSR_SW_MNG_SM 0x0400 1968#define IXGBE_GSSR_SW_MNG_SM 0x0400
1969#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */
1961#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ 1970#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */
1962#define IXGBE_GSSR_I2C_MASK 0x1800 1971#define IXGBE_GSSR_I2C_MASK 0x1800
1963#define IXGBE_GSSR_NVM_PHY_MASK 0xF 1972#define IXGBE_GSSR_NVM_PHY_MASK 0xF
@@ -1997,6 +2006,9 @@ enum {
1997#define IXGBE_PBANUM_PTR_GUARD 0xFAFA 2006#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
1998#define IXGBE_EEPROM_CHECKSUM 0x3F 2007#define IXGBE_EEPROM_CHECKSUM 0x3F
1999#define IXGBE_EEPROM_SUM 0xBABA 2008#define IXGBE_EEPROM_SUM 0xBABA
2009#define IXGBE_EEPROM_CTRL_4 0x45
2010#define IXGBE_EE_CTRL_4_INST_ID 0x10
2011#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4
2000#define IXGBE_PCIE_ANALOG_PTR 0x03 2012#define IXGBE_PCIE_ANALOG_PTR 0x03
2001#define IXGBE_ATLAS0_CONFIG_PTR 0x04 2013#define IXGBE_ATLAS0_CONFIG_PTR 0x04
2002#define IXGBE_PHY_PTR 0x04 2014#define IXGBE_PHY_PTR 0x04
@@ -2530,6 +2542,10 @@ enum ixgbe_fdir_pballoc_type {
2530#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 2542#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
2531#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 2543#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
2532#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 2544#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
2545#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000
2546#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21
2547#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */
2548#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */
2533#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 2549#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
2534#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 2550#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
2535#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 2551#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
@@ -2620,6 +2636,20 @@ enum ixgbe_fdir_pballoc_type {
2620#define FW_MAX_READ_BUFFER_SIZE 1024 2636#define FW_MAX_READ_BUFFER_SIZE 1024
2621#define FW_DISABLE_RXEN_CMD 0xDE 2637#define FW_DISABLE_RXEN_CMD 0xDE
2622#define FW_DISABLE_RXEN_LEN 0x1 2638#define FW_DISABLE_RXEN_LEN 0x1
2639#define FW_PHY_MGMT_REQ_CMD 0x20
2640#define FW_PHY_TOKEN_REQ_CMD 0x0A
2641#define FW_PHY_TOKEN_REQ_LEN 2
2642#define FW_PHY_TOKEN_REQ 0
2643#define FW_PHY_TOKEN_REL 1
2644#define FW_PHY_TOKEN_OK 1
2645#define FW_PHY_TOKEN_RETRY 0x80
2646#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */
2647#define FW_PHY_TOKEN_WAIT 5 /* seconds */
2648#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY)
2649#define FW_INT_PHY_REQ_CMD 0xB
2650#define FW_INT_PHY_REQ_LEN 10
2651#define FW_INT_PHY_REQ_READ 0
2652#define FW_INT_PHY_REQ_WRITE 1
2623 2653
2624/* Host Interface Command Structures */ 2654/* Host Interface Command Structures */
2625struct ixgbe_hic_hdr { 2655struct ixgbe_hic_hdr {
@@ -2688,6 +2718,28 @@ struct ixgbe_hic_disable_rxen {
2688 u16 pad3; 2718 u16 pad3;
2689}; 2719};
2690 2720
2721struct ixgbe_hic_phy_token_req {
2722 struct ixgbe_hic_hdr hdr;
2723 u8 port_number;
2724 u8 command_type;
2725 u16 pad;
2726};
2727
2728struct ixgbe_hic_internal_phy_req {
2729 struct ixgbe_hic_hdr hdr;
2730 u8 port_number;
2731 u8 command_type;
2732 __be16 address;
2733 u16 rsv1;
2734 __be32 write_data;
2735 u16 pad;
2736} __packed;
2737
2738struct ixgbe_hic_internal_phy_resp {
2739 struct ixgbe_hic_hdr hdr;
2740 __be32 read_data;
2741};
2742
2691/* Transmit Descriptor - Advanced */ 2743/* Transmit Descriptor - Advanced */
2692union ixgbe_adv_tx_desc { 2744union ixgbe_adv_tx_desc {
2693 struct { 2745 struct {
@@ -2948,7 +3000,6 @@ union ixgbe_atr_hash_dword {
2948 IXGBE_CAT(EEC, m), \ 3000 IXGBE_CAT(EEC, m), \
2949 IXGBE_CAT(FLA, m), \ 3001 IXGBE_CAT(FLA, m), \
2950 IXGBE_CAT(GRC, m), \ 3002 IXGBE_CAT(GRC, m), \
2951 IXGBE_CAT(SRAMREL, m), \
2952 IXGBE_CAT(FACTPS, m), \ 3003 IXGBE_CAT(FACTPS, m), \
2953 IXGBE_CAT(SWSM, m), \ 3004 IXGBE_CAT(SWSM, m), \
2954 IXGBE_CAT(SWFW_SYNC, m), \ 3005 IXGBE_CAT(SWFW_SYNC, m), \
@@ -2989,6 +3040,7 @@ enum ixgbe_mac_type {
2989 ixgbe_mac_X540, 3040 ixgbe_mac_X540,
2990 ixgbe_mac_X550, 3041 ixgbe_mac_X550,
2991 ixgbe_mac_X550EM_x, 3042 ixgbe_mac_X550EM_x,
3043 ixgbe_mac_x550em_a,
2992 ixgbe_num_macs 3044 ixgbe_num_macs
2993}; 3045};
2994 3046
@@ -3017,6 +3069,7 @@ enum ixgbe_phy_type {
3017 ixgbe_phy_qsfp_intel, 3069 ixgbe_phy_qsfp_intel,
3018 ixgbe_phy_qsfp_unknown, 3070 ixgbe_phy_qsfp_unknown,
3019 ixgbe_phy_sfp_unsupported, 3071 ixgbe_phy_sfp_unsupported,
3072 ixgbe_phy_sgmii,
3020 ixgbe_phy_generic 3073 ixgbe_phy_generic
3021}; 3074};
3022 3075
@@ -3130,8 +3183,9 @@ struct ixgbe_bus_info {
3130 enum ixgbe_bus_width width; 3183 enum ixgbe_bus_width width;
3131 enum ixgbe_bus_type type; 3184 enum ixgbe_bus_type type;
3132 3185
3133 u16 func; 3186 u8 func;
3134 u16 lan_id; 3187 u8 lan_id;
3188 u8 instance_id;
3135}; 3189};
3136 3190
3137/* Flow control parameters */ 3191/* Flow control parameters */
@@ -3266,6 +3320,7 @@ struct ixgbe_mac_operations {
3266 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 3320 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
3267 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); 3321 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
3268 void (*release_swfw_sync)(struct ixgbe_hw *, u32); 3322 void (*release_swfw_sync)(struct ixgbe_hw *, u32);
3323 void (*init_swfw_sync)(struct ixgbe_hw *);
3269 s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); 3324 s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
3270 s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); 3325 s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
3271 3326
@@ -3308,6 +3363,7 @@ struct ixgbe_mac_operations {
3308 3363
3309 /* Flow Control */ 3364 /* Flow Control */
3310 s32 (*fc_enable)(struct ixgbe_hw *); 3365 s32 (*fc_enable)(struct ixgbe_hw *);
3366 s32 (*setup_fc)(struct ixgbe_hw *);
3311 3367
3312 /* Manageability interface */ 3368 /* Manageability interface */
3313 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); 3369 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
@@ -3323,6 +3379,8 @@ struct ixgbe_mac_operations {
3323 s32 (*dmac_config)(struct ixgbe_hw *hw); 3379 s32 (*dmac_config)(struct ixgbe_hw *hw);
3324 s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); 3380 s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
3325 s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); 3381 s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
3382 s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
3383 s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
3326}; 3384};
3327 3385
3328struct ixgbe_phy_operations { 3386struct ixgbe_phy_operations {
@@ -3442,7 +3500,7 @@ struct ixgbe_mbx_stats {
3442}; 3500};
3443 3501
3444struct ixgbe_mbx_info { 3502struct ixgbe_mbx_info {
3445 struct ixgbe_mbx_operations ops; 3503 const struct ixgbe_mbx_operations *ops;
3446 struct ixgbe_mbx_stats stats; 3504 struct ixgbe_mbx_stats stats;
3447 u32 timeout; 3505 u32 timeout;
3448 u32 usec_delay; 3506 u32 usec_delay;
@@ -3475,10 +3533,10 @@ struct ixgbe_hw {
3475struct ixgbe_info { 3533struct ixgbe_info {
3476 enum ixgbe_mac_type mac; 3534 enum ixgbe_mac_type mac;
3477 s32 (*get_invariants)(struct ixgbe_hw *); 3535 s32 (*get_invariants)(struct ixgbe_hw *);
3478 struct ixgbe_mac_operations *mac_ops; 3536 const struct ixgbe_mac_operations *mac_ops;
3479 struct ixgbe_eeprom_operations *eeprom_ops; 3537 const struct ixgbe_eeprom_operations *eeprom_ops;
3480 struct ixgbe_phy_operations *phy_ops; 3538 const struct ixgbe_phy_operations *phy_ops;
3481 struct ixgbe_mbx_operations *mbx_ops; 3539 const struct ixgbe_mbx_operations *mbx_ops;
3482 const u32 *mvals; 3540 const u32 *mvals;
3483}; 3541};
3484 3542
@@ -3517,6 +3575,8 @@ struct ixgbe_info {
3517#define IXGBE_ERR_INVALID_ARGUMENT -32 3575#define IXGBE_ERR_INVALID_ARGUMENT -32
3518#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 3576#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
3519#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 3577#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
3578#define IXGBE_ERR_FW_RESP_INVALID -39
3579#define IXGBE_ERR_TOKEN_RETRY -40
3520#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 3580#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
3521 3581
3522#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) 3582#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
@@ -3525,6 +3585,9 @@ struct ixgbe_info {
3525 3585
3526#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) 3586#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
3527#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) 3587#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
3588#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
3589#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
3590#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
3528#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) 3591#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
3529#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) 3592#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
3530#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) 3593#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
@@ -3538,6 +3601,8 @@ struct ixgbe_info {
3538#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) 3601#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
3539#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) 3602#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
3540#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) 3603#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
3604#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12)
3605#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13)
3541#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) 3606#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
3542#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) 3607#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
3543#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) 3608#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
@@ -3547,6 +3612,15 @@ struct ixgbe_info {
3547#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) 3612#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
3548#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) 3613#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
3549 3614
3615#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28)
3616#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29)
3617
3618#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0)
3619#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1)
3620
3621#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12)
3622#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19)
3623
3550#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) 3624#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
3551#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) 3625#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
3552#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) 3626#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
@@ -3591,5 +3665,10 @@ struct ixgbe_info {
3591#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 3665#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
3592 3666
3593#define IXGBE_NW_MNG_IF_SEL 0x00011178 3667#define IXGBE_NW_MNG_IF_SEL 0x00011178
3668#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1)
3669#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23)
3594#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) 3670#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
3671#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
3672#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
3673 (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
3595#endif /* _IXGBE_TYPE_H_ */ 3674#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 2358c1b7d586..40824d85d807 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation. 4 Copyright(c) 1999 - 2016 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -747,6 +747,25 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
747} 747}
748 748
749/** 749/**
750 * ixgbe_init_swfw_sync_X540 - Release hardware semaphore
751 * @hw: pointer to hardware structure
752 *
753 * This function reset hardware semaphore bits for a semaphore that may
754 * have be left locked due to a catastrophic failure.
755 **/
756void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
757{
758 /* First try to grab the semaphore but we don't need to bother
759 * looking to see whether we got the lock or not since we do
760 * the same thing regardless of whether we got the lock or not.
761 * We got the lock - we release it.
762 * We timeout trying to get the lock - we force its release.
763 */
764 ixgbe_get_swfw_sync_semaphore(hw);
765 ixgbe_release_swfw_sync_semaphore(hw);
766}
767
768/**
750 * ixgbe_blink_led_start_X540 - Blink LED based on index. 769 * ixgbe_blink_led_start_X540 - Blink LED based on index.
751 * @hw: pointer to hardware structure 770 * @hw: pointer to hardware structure
752 * @index: led number to blink 771 * @index: led number to blink
@@ -810,7 +829,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
810 829
811 return 0; 830 return 0;
812} 831}
813static struct ixgbe_mac_operations mac_ops_X540 = { 832static const struct ixgbe_mac_operations mac_ops_X540 = {
814 .init_hw = &ixgbe_init_hw_generic, 833 .init_hw = &ixgbe_init_hw_generic,
815 .reset_hw = &ixgbe_reset_hw_X540, 834 .reset_hw = &ixgbe_reset_hw_X540,
816 .start_hw = &ixgbe_start_hw_X540, 835 .start_hw = &ixgbe_start_hw_X540,
@@ -846,6 +865,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
846 .clear_vfta = &ixgbe_clear_vfta_generic, 865 .clear_vfta = &ixgbe_clear_vfta_generic,
847 .set_vfta = &ixgbe_set_vfta_generic, 866 .set_vfta = &ixgbe_set_vfta_generic,
848 .fc_enable = &ixgbe_fc_enable_generic, 867 .fc_enable = &ixgbe_fc_enable_generic,
868 .setup_fc = ixgbe_setup_fc_generic,
849 .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, 869 .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
850 .init_uta_tables = &ixgbe_init_uta_tables_generic, 870 .init_uta_tables = &ixgbe_init_uta_tables_generic,
851 .setup_sfp = NULL, 871 .setup_sfp = NULL,
@@ -853,6 +873,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
853 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 873 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
854 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, 874 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
855 .release_swfw_sync = &ixgbe_release_swfw_sync_X540, 875 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
876 .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
856 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, 877 .disable_rx_buff = &ixgbe_disable_rx_buff_generic,
857 .enable_rx_buff = &ixgbe_enable_rx_buff_generic, 878 .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
858 .get_thermal_sensor_data = NULL, 879 .get_thermal_sensor_data = NULL,
@@ -863,7 +884,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
863 .disable_rx = &ixgbe_disable_rx_generic, 884 .disable_rx = &ixgbe_disable_rx_generic,
864}; 885};
865 886
866static struct ixgbe_eeprom_operations eeprom_ops_X540 = { 887static const struct ixgbe_eeprom_operations eeprom_ops_X540 = {
867 .init_params = &ixgbe_init_eeprom_params_X540, 888 .init_params = &ixgbe_init_eeprom_params_X540,
868 .read = &ixgbe_read_eerd_X540, 889 .read = &ixgbe_read_eerd_X540,
869 .read_buffer = &ixgbe_read_eerd_buffer_X540, 890 .read_buffer = &ixgbe_read_eerd_buffer_X540,
@@ -874,7 +895,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
874 .update_checksum = &ixgbe_update_eeprom_checksum_X540, 895 .update_checksum = &ixgbe_update_eeprom_checksum_X540,
875}; 896};
876 897
877static struct ixgbe_phy_operations phy_ops_X540 = { 898static const struct ixgbe_phy_operations phy_ops_X540 = {
878 .identify = &ixgbe_identify_phy_generic, 899 .identify = &ixgbe_identify_phy_generic,
879 .identify_sfp = &ixgbe_identify_sfp_module_generic, 900 .identify_sfp = &ixgbe_identify_sfp_module_generic,
880 .init = NULL, 901 .init = NULL,
@@ -897,7 +918,7 @@ static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
897 IXGBE_MVALS_INIT(X540) 918 IXGBE_MVALS_INIT(X540)
898}; 919};
899 920
900struct ixgbe_info ixgbe_X540_info = { 921const struct ixgbe_info ixgbe_X540_info = {
901 .mac = ixgbe_mac_X540, 922 .mac = ixgbe_mac_X540,
902 .get_invariants = &ixgbe_get_invariants_X540, 923 .get_invariants = &ixgbe_get_invariants_X540,
903 .mac_ops = &mac_ops_X540, 924 .mac_ops = &mac_ops_X540,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index a1468b1f4d8a..e21cd48491d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -36,4 +36,5 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
36s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); 36s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
37s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); 37s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
38void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); 38void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
39void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
39s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); 40s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 87aca3f7c3de..c71e93ed4451 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel 10 Gigabit PCI Express Linux driver 3 * Intel 10 Gigabit PCI Express Linux driver
4 * Copyright(c) 1999 - 2015 Intel Corporation. 4 * Copyright(c) 1999 - 2016 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,7 @@
27#include "ixgbe_phy.h" 27#include "ixgbe_phy.h"
28 28
29static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); 29static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
30static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
30 31
31static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) 32static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
32{ 33{
@@ -272,16 +273,26 @@ out:
272static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) 273static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
273{ 274{
274 switch (hw->device_id) { 275 switch (hw->device_id) {
276 case IXGBE_DEV_ID_X550EM_A_SFP:
277 if (hw->bus.lan_id)
278 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
279 else
280 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
281 return ixgbe_identify_module_generic(hw);
275 case IXGBE_DEV_ID_X550EM_X_SFP: 282 case IXGBE_DEV_ID_X550EM_X_SFP:
276 /* set up for CS4227 usage */ 283 /* set up for CS4227 usage */
277 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; 284 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
278 ixgbe_setup_mux_ctl(hw); 285 ixgbe_setup_mux_ctl(hw);
279 ixgbe_check_cs4227(hw); 286 ixgbe_check_cs4227(hw);
287 /* Fallthrough */
288 case IXGBE_DEV_ID_X550EM_A_SFP_N:
280 return ixgbe_identify_module_generic(hw); 289 return ixgbe_identify_module_generic(hw);
281 case IXGBE_DEV_ID_X550EM_X_KX4: 290 case IXGBE_DEV_ID_X550EM_X_KX4:
282 hw->phy.type = ixgbe_phy_x550em_kx4; 291 hw->phy.type = ixgbe_phy_x550em_kx4;
283 break; 292 break;
284 case IXGBE_DEV_ID_X550EM_X_KR: 293 case IXGBE_DEV_ID_X550EM_X_KR:
294 case IXGBE_DEV_ID_X550EM_A_KR:
295 case IXGBE_DEV_ID_X550EM_A_KR_L:
285 hw->phy.type = ixgbe_phy_x550em_kr; 296 hw->phy.type = ixgbe_phy_x550em_kr;
286 break; 297 break;
287 case IXGBE_DEV_ID_X550EM_X_1G_T: 298 case IXGBE_DEV_ID_X550EM_X_1G_T:
@@ -355,7 +366,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
355 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); 366 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
356 if (!(command & IXGBE_SB_IOSF_CTRL_BUSY)) 367 if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
357 break; 368 break;
358 usleep_range(10, 20); 369 udelay(10);
359 } 370 }
360 if (ctrl) 371 if (ctrl)
361 *ctrl = command; 372 *ctrl = command;
@@ -412,6 +423,121 @@ out:
412 return ret; 423 return ret;
413} 424}
414 425
426/**
427 * ixgbe_get_phy_token - Get the token for shared PHY access
428 * @hw: Pointer to hardware structure
429 */
430static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
431{
432 struct ixgbe_hic_phy_token_req token_cmd;
433 s32 status;
434
435 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
436 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
437 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
438 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
439 token_cmd.port_number = hw->bus.lan_id;
440 token_cmd.command_type = FW_PHY_TOKEN_REQ;
441 token_cmd.pad = 0;
442 status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
443 IXGBE_HI_COMMAND_TIMEOUT,
444 true);
445 if (status)
446 return status;
447 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
448 return 0;
449 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
450 return IXGBE_ERR_FW_RESP_INVALID;
451
452 return IXGBE_ERR_TOKEN_RETRY;
453}
454
455/**
456 * ixgbe_put_phy_token - Put the token for shared PHY access
457 * @hw: Pointer to hardware structure
458 */
459static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
460{
461 struct ixgbe_hic_phy_token_req token_cmd;
462 s32 status;
463
464 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
465 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
466 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
467 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
468 token_cmd.port_number = hw->bus.lan_id;
469 token_cmd.command_type = FW_PHY_TOKEN_REL;
470 token_cmd.pad = 0;
471 status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
472 IXGBE_HI_COMMAND_TIMEOUT,
473 true);
474 if (status)
475 return status;
476 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
477 return 0;
478 return IXGBE_ERR_FW_RESP_INVALID;
479}
480
481/**
482 * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register
483 * @hw: pointer to hardware structure
484 * @reg_addr: 32 bit PHY register to write
485 * @device_type: 3 bit device type
486 * @data: Data to write to the register
487 **/
488static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
489 __always_unused u32 device_type,
490 u32 data)
491{
492 struct ixgbe_hic_internal_phy_req write_cmd;
493
494 memset(&write_cmd, 0, sizeof(write_cmd));
495 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
496 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
497 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
498 write_cmd.port_number = hw->bus.lan_id;
499 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
500 write_cmd.address = cpu_to_be16(reg_addr);
501 write_cmd.write_data = cpu_to_be32(data);
502
503 return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd),
504 IXGBE_HI_COMMAND_TIMEOUT, false);
505}
506
507/**
508 * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register
509 * @hw: pointer to hardware structure
510 * @reg_addr: 32 bit PHY register to write
511 * @device_type: 3 bit device type
512 * @data: Pointer to read data from the register
513 **/
514static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
515 __always_unused u32 device_type,
516 u32 *data)
517{
518 union {
519 struct ixgbe_hic_internal_phy_req cmd;
520 struct ixgbe_hic_internal_phy_resp rsp;
521 } hic;
522 s32 status;
523
524 memset(&hic, 0, sizeof(hic));
525 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
526 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
527 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
528 hic.cmd.port_number = hw->bus.lan_id;
529 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
530 hic.cmd.address = cpu_to_be16(reg_addr);
531
532 status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
533 IXGBE_HI_COMMAND_TIMEOUT, true);
534
535 /* Extract the register value from the response. */
536 *data = be32_to_cpu(hic.rsp.read_data);
537
538 return status;
539}
540
415/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface 541/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
416 * command assuming that the semaphore is already obtained. 542 * command assuming that the semaphore is already obtained.
417 * @hw: pointer to hardware structure 543 * @hw: pointer to hardware structure
@@ -436,8 +562,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
436 /* one word */ 562 /* one word */
437 buffer.length = cpu_to_be16(sizeof(u16)); 563 buffer.length = cpu_to_be16(sizeof(u16));
438 564
439 status = ixgbe_host_interface_command(hw, (u32 *)&buffer, 565 status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
440 sizeof(buffer),
441 IXGBE_HI_COMMAND_TIMEOUT, false); 566 IXGBE_HI_COMMAND_TIMEOUT, false);
442 if (status) 567 if (status)
443 return status; 568 return status;
@@ -487,7 +612,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
487 buffer.address = cpu_to_be32((offset + current_word) * 2); 612 buffer.address = cpu_to_be32((offset + current_word) * 2);
488 buffer.length = cpu_to_be16(words_to_read * 2); 613 buffer.length = cpu_to_be16(words_to_read * 2);
489 614
490 status = ixgbe_host_interface_command(hw, (u32 *)&buffer, 615 status = ixgbe_host_interface_command(hw, &buffer,
491 sizeof(buffer), 616 sizeof(buffer),
492 IXGBE_HI_COMMAND_TIMEOUT, 617 IXGBE_HI_COMMAND_TIMEOUT,
493 false); 618 false);
@@ -770,8 +895,7 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
770 buffer.data = data; 895 buffer.data = data;
771 buffer.address = cpu_to_be32(offset * 2); 896 buffer.address = cpu_to_be32(offset * 2);
772 897
773 status = ixgbe_host_interface_command(hw, (u32 *)&buffer, 898 status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
774 sizeof(buffer),
775 IXGBE_HI_COMMAND_TIMEOUT, false); 899 IXGBE_HI_COMMAND_TIMEOUT, false);
776 return status; 900 return status;
777} 901}
@@ -813,8 +937,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
813 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; 937 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
814 buffer.req.checksum = FW_DEFAULT_CHECKSUM; 938 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
815 939
816 status = ixgbe_host_interface_command(hw, (u32 *)&buffer, 940 status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
817 sizeof(buffer),
818 IXGBE_HI_COMMAND_TIMEOUT, false); 941 IXGBE_HI_COMMAND_TIMEOUT, false);
819 return status; 942 return status;
820} 943}
@@ -861,9 +984,9 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
861 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; 984 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
862 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; 985 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
863 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; 986 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
864 fw_cmd.port_number = (u8)hw->bus.lan_id; 987 fw_cmd.port_number = hw->bus.lan_id;
865 988
866 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, 989 status = ixgbe_host_interface_command(hw, &fw_cmd,
867 sizeof(struct ixgbe_hic_disable_rxen), 990 sizeof(struct ixgbe_hic_disable_rxen),
868 IXGBE_HI_COMMAND_TIMEOUT, true); 991 IXGBE_HI_COMMAND_TIMEOUT, true);
869 992
@@ -1248,6 +1371,117 @@ i2c_err:
1248} 1371}
1249 1372
1250/** 1373/**
1374 * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP
1375 * @hw: pointer to hardware structure
1376 *
1377 * Configure the the integrated PHY for native SFP support.
1378 */
1379static s32
1380ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1381 __always_unused bool autoneg_wait_to_complete)
1382{
1383 bool setup_linear = false;
1384 u32 reg_phy_int;
1385 s32 rc;
1386
1387 /* Check if SFP module is supported and linear */
1388 rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
1389
1390 /* If no SFP module present, then return success. Return success since
1391 * SFP not present error is not excepted in the setup MAC link flow.
1392 */
1393 if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
1394 return 0;
1395
1396 if (!rc)
1397 return rc;
1398
1399 /* Configure internal PHY for native SFI */
1400 rc = hw->mac.ops.read_iosf_sb_reg(hw,
1401 IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
1402 IXGBE_SB_IOSF_TARGET_KR_PHY,
1403 &reg_phy_int);
1404 if (rc)
1405 return rc;
1406
1407 if (setup_linear) {
1408 reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING;
1409 reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR;
1410 } else {
1411 reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING;
1412 reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR;
1413 }
1414
1415 rc = hw->mac.ops.write_iosf_sb_reg(hw,
1416 IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
1417 IXGBE_SB_IOSF_TARGET_KR_PHY,
1418 reg_phy_int);
1419 if (rc)
1420 return rc;
1421
1422 /* Setup XFI/SFI internal link */
1423 return ixgbe_setup_ixfi_x550em(hw, &speed);
1424}
1425
1426/**
1427 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
1428 * @hw: pointer to hardware structure
1429 *
1430 * Configure the the integrated PHY for SFP support.
1431 */
1432static s32
1433ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1434 __always_unused bool autoneg_wait_to_complete)
1435{
1436 u32 reg_slice, slice_offset;
1437 bool setup_linear = false;
1438 u16 reg_phy_ext;
1439 s32 rc;
1440
1441 /* Check if SFP module is supported and linear */
1442 rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
1443
1444 /* If no SFP module present, then return success. Return success since
1445 * SFP not present error is not excepted in the setup MAC link flow.
1446 */
1447 if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
1448 return 0;
1449
1450 if (!rc)
1451 return rc;
1452
1453 /* Configure internal PHY for KR/KX. */
1454 ixgbe_setup_kr_speed_x550em(hw, speed);
1455
1456 if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF)
1457 return IXGBE_ERR_PHY_ADDR_INVALID;
1458
1459 /* Get external PHY device id */
1460 rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
1461 IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
1462 if (rc)
1463 return rc;
1464
1465 /* When configuring quad port CS4223, the MAC instance is part
1466 * of the slice offset.
1467 */
1468 if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
1469 slice_offset = (hw->bus.lan_id +
1470 (hw->bus.instance_id << 1)) << 12;
1471 else
1472 slice_offset = hw->bus.lan_id << 12;
1473
1474 /* Configure CS4227/CS4223 LINE side to proper mode. */
1475 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
1476 if (setup_linear)
1477 reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
1478 else
1479 reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
1480 return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE,
1481 reg_phy_ext);
1482}
1483
1484/**
1251 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed 1485 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
1252 * @hw: pointer to hardware structure 1486 * @hw: pointer to hardware structure
1253 * @speed: new link speed 1487 * @speed: new link speed
@@ -1326,6 +1560,57 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
1326 return 0; 1560 return 0;
1327} 1561}
1328 1562
1563/**
1564 * ixgbe_setup_sgmii - Set up link for sgmii
1565 * @hw: pointer to hardware structure
1566 */
1567static s32
1568ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
1569 __always_unused bool autoneg_wait_to_complete)
1570{
1571 struct ixgbe_mac_info *mac = &hw->mac;
1572 u32 lval, sval;
1573 s32 rc;
1574
1575 rc = mac->ops.read_iosf_sb_reg(hw,
1576 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1577 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1578 if (rc)
1579 return rc;
1580
1581 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1582 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1583 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1584 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1585 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1586 rc = mac->ops.write_iosf_sb_reg(hw,
1587 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1588 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1589 if (rc)
1590 return rc;
1591
1592 rc = mac->ops.read_iosf_sb_reg(hw,
1593 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1594 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1595 if (rc)
1596 return rc;
1597
1598 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1599 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1600 rc = mac->ops.write_iosf_sb_reg(hw,
1601 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1602 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1603 if (rc)
1604 return rc;
1605
1606 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1607 rc = mac->ops.write_iosf_sb_reg(hw,
1608 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1609 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1610
1611 return rc;
1612}
1613
1329/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers 1614/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1330 * @hw: pointer to hardware structure 1615 * @hw: pointer to hardware structure
1331 **/ 1616 **/
@@ -1342,15 +1627,35 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1342 mac->ops.enable_tx_laser = NULL; 1627 mac->ops.enable_tx_laser = NULL;
1343 mac->ops.flap_tx_laser = NULL; 1628 mac->ops.flap_tx_laser = NULL;
1344 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; 1629 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
1345 mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; 1630 mac->ops.setup_fc = ixgbe_setup_fc_x550em;
1631 switch (hw->device_id) {
1632 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1633 mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n;
1634 break;
1635 case IXGBE_DEV_ID_X550EM_A_SFP:
1636 mac->ops.setup_mac_link =
1637 ixgbe_setup_mac_link_sfp_x550a;
1638 break;
1639 default:
1640 mac->ops.setup_mac_link =
1641 ixgbe_setup_mac_link_sfp_x550em;
1642 break;
1643 }
1346 mac->ops.set_rate_select_speed = 1644 mac->ops.set_rate_select_speed =
1347 ixgbe_set_soft_rate_select_speed; 1645 ixgbe_set_soft_rate_select_speed;
1348 break; 1646 break;
1349 case ixgbe_media_type_copper: 1647 case ixgbe_media_type_copper:
1350 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; 1648 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
1649 mac->ops.setup_fc = ixgbe_setup_fc_generic;
1351 mac->ops.check_link = ixgbe_check_link_t_X550em; 1650 mac->ops.check_link = ixgbe_check_link_t_X550em;
1651 return;
1652 case ixgbe_media_type_backplane:
1653 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
1654 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
1655 mac->ops.setup_link = ixgbe_setup_sgmii;
1352 break; 1656 break;
1353 default: 1657 default:
1658 mac->ops.setup_fc = ixgbe_setup_fc_x550em;
1354 break; 1659 break;
1355 } 1660 }
1356} 1661}
@@ -1614,7 +1919,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
1614 s32 status; 1919 s32 status;
1615 u32 reg_val; 1920 u32 reg_val;
1616 1921
1617 status = ixgbe_read_iosf_sb_reg_x550(hw, 1922 status = hw->mac.ops.read_iosf_sb_reg(hw,
1618 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), 1923 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1619 IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); 1924 IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
1620 if (status) 1925 if (status)
@@ -1636,7 +1941,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
1636 1941
1637 /* Restart auto-negotiation. */ 1942 /* Restart auto-negotiation. */
1638 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; 1943 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1639 status = ixgbe_write_iosf_sb_reg_x550(hw, 1944 status = hw->mac.ops.write_iosf_sb_reg(hw,
1640 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), 1945 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1641 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); 1946 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1642 1947
@@ -1653,9 +1958,9 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
1653 s32 status; 1958 s32 status;
1654 u32 reg_val; 1959 u32 reg_val;
1655 1960
1656 status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, 1961 status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
1657 IXGBE_SB_IOSF_TARGET_KX4_PCS0 + 1962 IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
1658 hw->bus.lan_id, &reg_val); 1963 hw->bus.lan_id, &reg_val);
1659 if (status) 1964 if (status)
1660 return status; 1965 return status;
1661 1966
@@ -1674,20 +1979,24 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
1674 1979
1675 /* Restart auto-negotiation. */ 1980 /* Restart auto-negotiation. */
1676 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; 1981 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
1677 status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, 1982 status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
1678 IXGBE_SB_IOSF_TARGET_KX4_PCS0 + 1983 IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
1679 hw->bus.lan_id, reg_val); 1984 hw->bus.lan_id, reg_val);
1680 1985
1681 return status; 1986 return status;
1682} 1987}
1683 1988
1684/** ixgbe_setup_kr_x550em - Configure the KR PHY. 1989/**
1685 * @hw: pointer to hardware structure 1990 * ixgbe_setup_kr_x550em - Configure the KR PHY
1991 * @hw: pointer to hardware structure
1686 * 1992 *
1687 * Configures the integrated KR PHY. 1993 * Configures the integrated KR PHY for X550EM_x.
1688 **/ 1994 **/
1689static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) 1995static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
1690{ 1996{
1997 if (hw->mac.type != ixgbe_mac_X550EM_x)
1998 return 0;
1999
1691 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); 2000 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
1692} 2001}
1693 2002
@@ -1842,6 +2151,86 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
1842 return status; 2151 return status;
1843} 2152}
1844 2153
2154/**
2155 * ixgbe_setup_fc_x550em - Set up flow control
2156 * @hw: pointer to hardware structure
2157 */
2158static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
2159{
2160 bool pause, asm_dir;
2161 u32 reg_val;
2162 s32 rc;
2163
2164 /* Validate the requested mode */
2165 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
2166 hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
2167 return IXGBE_ERR_INVALID_LINK_SETTINGS;
2168 }
2169
2170 /* 10gig parts do not have a word in the EEPROM to determine the
2171 * default flow control setting, so we explicitly set it to full.
2172 */
2173 if (hw->fc.requested_mode == ixgbe_fc_default)
2174 hw->fc.requested_mode = ixgbe_fc_full;
2175
2176 /* Determine PAUSE and ASM_DIR bits. */
2177 switch (hw->fc.requested_mode) {
2178 case ixgbe_fc_none:
2179 pause = false;
2180 asm_dir = false;
2181 break;
2182 case ixgbe_fc_tx_pause:
2183 pause = false;
2184 asm_dir = true;
2185 break;
2186 case ixgbe_fc_rx_pause:
2187 /* Rx Flow control is enabled and Tx Flow control is
2188 * disabled by software override. Since there really
2189 * isn't a way to advertise that we are capable of RX
2190 * Pause ONLY, we will advertise that we support both
2191 * symmetric and asymmetric Rx PAUSE, as such we fall
2192 * through to the fc_full statement. Later, we will
2193 * disable the adapter's ability to send PAUSE frames.
2194 */
2195 /* Fallthrough */
2196 case ixgbe_fc_full:
2197 pause = true;
2198 asm_dir = true;
2199 break;
2200 default:
2201 hw_err(hw, "Flow control param set incorrectly\n");
2202 return IXGBE_ERR_CONFIG;
2203 }
2204
2205 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR &&
2206 hw->device_id != IXGBE_DEV_ID_X550EM_A_KR &&
2207 hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L)
2208 return 0;
2209
2210 rc = hw->mac.ops.read_iosf_sb_reg(hw,
2211 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
2212 IXGBE_SB_IOSF_TARGET_KR_PHY,
2213 &reg_val);
2214 if (rc)
2215 return rc;
2216
2217 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
2218 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
2219 if (pause)
2220 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
2221 if (asm_dir)
2222 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
2223 rc = hw->mac.ops.write_iosf_sb_reg(hw,
2224 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
2225 IXGBE_SB_IOSF_TARGET_KR_PHY,
2226 reg_val);
2227
2228 /* This device does not fully support AN. */
2229 hw->fc.disable_fc_autoneg = true;
2230
2231 return rc;
2232}
2233
1845/** ixgbe_enter_lplu_x550em - Transition to low power states 2234/** ixgbe_enter_lplu_x550em - Transition to low power states
1846 * @hw: pointer to hardware structure 2235 * @hw: pointer to hardware structure
1847 * 2236 *
@@ -1939,6 +2328,36 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
1939 return status; 2328 return status;
1940} 2329}
1941 2330
2331/**
2332 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2333 * @hw: pointer to hardware structure
2334 *
2335 * Read NW_MNG_IF_SEL register and save field values.
2336 */
2337static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2338{
2339 /* Save NW management interface connected on board. This is used
2340 * to determine internal PHY mode.
2341 */
2342 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2343
2344 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2345 * PHY address. This register field was has only been used for X552.
2346 */
2347 if (!hw->phy.nw_mng_if_sel) {
2348 if (hw->mac.type == ixgbe_mac_x550em_a) {
2349 struct ixgbe_adapter *adapter = hw->back;
2350
2351 e_warn(drv, "nw_mng_if_sel not set\n");
2352 }
2353 return;
2354 }
2355
2356 hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
2357 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2358 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2359}
2360
1942/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init 2361/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
1943 * @hw: pointer to hardware structure 2362 * @hw: pointer to hardware structure
1944 * 2363 *
@@ -1953,14 +2372,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
1953 2372
1954 hw->mac.ops.set_lan_id(hw); 2373 hw->mac.ops.set_lan_id(hw);
1955 2374
2375 ixgbe_read_mng_if_sel_x550em(hw);
2376
1956 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { 2377 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
1957 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; 2378 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
1958 ixgbe_setup_mux_ctl(hw); 2379 ixgbe_setup_mux_ctl(hw);
1959
1960 /* Save NW management interface connected on board. This is used
1961 * to determine internal PHY mode.
1962 */
1963 phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
1964 } 2380 }
1965 2381
1966 /* Identify the PHY or SFP module */ 2382 /* Identify the PHY or SFP module */
@@ -2023,16 +2439,24 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
2023 2439
2024 /* Detect if there is a copper PHY attached. */ 2440 /* Detect if there is a copper PHY attached. */
2025 switch (hw->device_id) { 2441 switch (hw->device_id) {
2442 case IXGBE_DEV_ID_X550EM_A_SGMII:
2443 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2444 hw->phy.type = ixgbe_phy_sgmii;
2445 /* Fallthrough */
2026 case IXGBE_DEV_ID_X550EM_X_KR: 2446 case IXGBE_DEV_ID_X550EM_X_KR:
2027 case IXGBE_DEV_ID_X550EM_X_KX4: 2447 case IXGBE_DEV_ID_X550EM_X_KX4:
2448 case IXGBE_DEV_ID_X550EM_A_KR:
2449 case IXGBE_DEV_ID_X550EM_A_KR_L:
2028 media_type = ixgbe_media_type_backplane; 2450 media_type = ixgbe_media_type_backplane;
2029 break; 2451 break;
2030 case IXGBE_DEV_ID_X550EM_X_SFP: 2452 case IXGBE_DEV_ID_X550EM_X_SFP:
2453 case IXGBE_DEV_ID_X550EM_A_SFP:
2454 case IXGBE_DEV_ID_X550EM_A_SFP_N:
2031 media_type = ixgbe_media_type_fiber; 2455 media_type = ixgbe_media_type_fiber;
2032 break; 2456 break;
2033 case IXGBE_DEV_ID_X550EM_X_1G_T: 2457 case IXGBE_DEV_ID_X550EM_X_1G_T:
2034 case IXGBE_DEV_ID_X550EM_X_10G_T: 2458 case IXGBE_DEV_ID_X550EM_X_10G_T:
2035 media_type = ixgbe_media_type_copper; 2459 media_type = ixgbe_media_type_copper;
2036 break; 2460 break;
2037 default: 2461 default:
2038 media_type = ixgbe_media_type_unknown; 2462 media_type = ixgbe_media_type_unknown;
@@ -2080,6 +2504,27 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2080 return status; 2504 return status;
2081} 2505}
2082 2506
2507/**
2508 * ixgbe_set_mdio_speed - Set MDIO clock speed
2509 * @hw: pointer to hardware structure
2510 */
2511static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2512{
2513 u32 hlreg0;
2514
2515 switch (hw->device_id) {
2516 case IXGBE_DEV_ID_X550EM_X_10G_T:
2517 case IXGBE_DEV_ID_X550EM_A_SFP:
2518 /* Config MDIO clock speed before the first MDIO PHY access */
2519 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2520 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2521 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2522 break;
2523 default:
2524 break;
2525 }
2526}
2527
2083/** ixgbe_reset_hw_X550em - Perform hardware reset 2528/** ixgbe_reset_hw_X550em - Perform hardware reset
2084 ** @hw: pointer to hardware structure 2529 ** @hw: pointer to hardware structure
2085 ** 2530 **
@@ -2093,7 +2538,6 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2093 s32 status; 2538 s32 status;
2094 u32 ctrl = 0; 2539 u32 ctrl = 0;
2095 u32 i; 2540 u32 i;
2096 u32 hlreg0;
2097 bool link_up = false; 2541 bool link_up = false;
2098 2542
2099 /* Call adapter stop to disable Tx/Rx and clear interrupts */ 2543 /* Call adapter stop to disable Tx/Rx and clear interrupts */
@@ -2179,11 +2623,7 @@ mac_reset_top:
2179 hw->mac.num_rar_entries = 128; 2623 hw->mac.num_rar_entries = 128;
2180 hw->mac.ops.init_rx_addrs(hw); 2624 hw->mac.ops.init_rx_addrs(hw);
2181 2625
2182 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2626 ixgbe_set_mdio_speed(hw);
2183 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2184 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2185 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2186 }
2187 2627
2188 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) 2628 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2189 ixgbe_setup_mux_ctl(hw); 2629 ixgbe_setup_mux_ctl(hw);
@@ -2296,6 +2736,110 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
2296 ixgbe_release_swfw_sync_X540(hw, mask); 2736 ixgbe_release_swfw_sync_X540(hw, mask);
2297} 2737}
2298 2738
2739/**
2740 * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore
2741 * @hw: pointer to hardware structure
2742 * @mask: Mask to specify which semaphore to acquire
2743 *
2744 * Acquires the SWFW semaphore and get the shared PHY token as needed
2745 */
2746static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
2747{
2748 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
2749 int retries = FW_PHY_TOKEN_RETRIES;
2750 s32 status;
2751
2752 while (--retries) {
2753 status = 0;
2754 if (hmask)
2755 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
2756 if (status)
2757 return status;
2758 if (!(mask & IXGBE_GSSR_TOKEN_SM))
2759 return 0;
2760
2761 status = ixgbe_get_phy_token(hw);
2762 if (!status)
2763 return 0;
2764 if (hmask)
2765 ixgbe_release_swfw_sync_X540(hw, hmask);
2766 if (status != IXGBE_ERR_TOKEN_RETRY)
2767 return status;
2768 udelay(FW_PHY_TOKEN_DELAY * 1000);
2769 }
2770
2771 return status;
2772}
2773
2774/**
2775 * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore
2776 * @hw: pointer to hardware structure
2777 * @mask: Mask to specify which semaphore to release
2778 *
2779 * Release the SWFW semaphore and puts the shared PHY token as needed
2780 */
2781static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
2782{
2783 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
2784
2785 if (mask & IXGBE_GSSR_TOKEN_SM)
2786 ixgbe_put_phy_token(hw);
2787
2788 if (hmask)
2789 ixgbe_release_swfw_sync_X540(hw, hmask);
2790}
2791
2792/**
2793 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
2794 * @hw: pointer to hardware structure
2795 * @reg_addr: 32 bit address of PHY register to read
2796 * @phy_data: Pointer to read data from PHY register
2797 *
2798 * Reads a value from a specified PHY register using the SWFW lock and PHY
2799 * Token. The PHY Token is needed since the MDIO is shared between to MAC
2800 * instances.
2801 */
2802static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
2803 u32 device_type, u16 *phy_data)
2804{
2805 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
2806 s32 status;
2807
2808 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
2809 return IXGBE_ERR_SWFW_SYNC;
2810
2811 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
2812
2813 hw->mac.ops.release_swfw_sync(hw, mask);
2814
2815 return status;
2816}
2817
2818/**
2819 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
2820 * @hw: pointer to hardware structure
2821 * @reg_addr: 32 bit PHY register to write
2822 * @device_type: 5 bit device type
2823 * @phy_data: Data to write to the PHY register
2824 *
2825 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
2826 * The PHY Token is needed since the MDIO is shared between to MAC instances.
2827 */
2828static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
2829 u32 device_type, u16 phy_data)
2830{
2831 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
2832 s32 status;
2833
2834 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
2835 return IXGBE_ERR_SWFW_SYNC;
2836
2837 status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
2838 hw->mac.ops.release_swfw_sync(hw, mask);
2839
2840 return status;
2841}
2842
2299#define X550_COMMON_MAC \ 2843#define X550_COMMON_MAC \
2300 .init_hw = &ixgbe_init_hw_generic, \ 2844 .init_hw = &ixgbe_init_hw_generic, \
2301 .start_hw = &ixgbe_start_hw_X540, \ 2845 .start_hw = &ixgbe_start_hw_X540, \
@@ -2337,12 +2881,10 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
2337 .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ 2881 .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
2338 .get_thermal_sensor_data = NULL, \ 2882 .get_thermal_sensor_data = NULL, \
2339 .init_thermal_sensor_thresh = NULL, \ 2883 .init_thermal_sensor_thresh = NULL, \
2340 .prot_autoc_read = &prot_autoc_read_generic, \
2341 .prot_autoc_write = &prot_autoc_write_generic, \
2342 .enable_rx = &ixgbe_enable_rx_generic, \ 2884 .enable_rx = &ixgbe_enable_rx_generic, \
2343 .disable_rx = &ixgbe_disable_rx_x550, \ 2885 .disable_rx = &ixgbe_disable_rx_x550, \
2344 2886
2345static struct ixgbe_mac_operations mac_ops_X550 = { 2887static const struct ixgbe_mac_operations mac_ops_X550 = {
2346 X550_COMMON_MAC 2888 X550_COMMON_MAC
2347 .reset_hw = &ixgbe_reset_hw_X540, 2889 .reset_hw = &ixgbe_reset_hw_X540,
2348 .get_media_type = &ixgbe_get_media_type_X540, 2890 .get_media_type = &ixgbe_get_media_type_X540,
@@ -2354,9 +2896,13 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
2354 .setup_sfp = NULL, 2896 .setup_sfp = NULL,
2355 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, 2897 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
2356 .release_swfw_sync = &ixgbe_release_swfw_sync_X540, 2898 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
2899 .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
2900 .prot_autoc_read = prot_autoc_read_generic,
2901 .prot_autoc_write = prot_autoc_write_generic,
2902 .setup_fc = ixgbe_setup_fc_generic,
2357}; 2903};
2358 2904
2359static struct ixgbe_mac_operations mac_ops_X550EM_x = { 2905static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
2360 X550_COMMON_MAC 2906 X550_COMMON_MAC
2361 .reset_hw = &ixgbe_reset_hw_X550em, 2907 .reset_hw = &ixgbe_reset_hw_X550em,
2362 .get_media_type = &ixgbe_get_media_type_X550em, 2908 .get_media_type = &ixgbe_get_media_type_X550em,
@@ -2368,6 +2914,27 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = {
2368 .setup_sfp = ixgbe_setup_sfp_modules_X550em, 2914 .setup_sfp = ixgbe_setup_sfp_modules_X550em,
2369 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, 2915 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
2370 .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, 2916 .release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
2917 .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
2918 .setup_fc = NULL, /* defined later */
2919 .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550,
2920 .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
2921};
2922
2923static struct ixgbe_mac_operations mac_ops_x550em_a = {
2924 X550_COMMON_MAC
2925 .reset_hw = ixgbe_reset_hw_X550em,
2926 .get_media_type = ixgbe_get_media_type_X550em,
2927 .get_san_mac_addr = NULL,
2928 .get_wwn_prefix = NULL,
2929 .setup_link = NULL, /* defined later */
2930 .get_link_capabilities = ixgbe_get_link_capabilities_X550em,
2931 .get_bus_info = ixgbe_get_bus_info_X550em,
2932 .setup_sfp = ixgbe_setup_sfp_modules_X550em,
2933 .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
2934 .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
2935 .setup_fc = ixgbe_setup_fc_generic,
2936 .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
2937 .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
2371}; 2938};
2372 2939
2373#define X550_COMMON_EEP \ 2940#define X550_COMMON_EEP \
@@ -2379,12 +2946,12 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = {
2379 .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ 2946 .update_checksum = &ixgbe_update_eeprom_checksum_X550, \
2380 .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ 2947 .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
2381 2948
2382static struct ixgbe_eeprom_operations eeprom_ops_X550 = { 2949static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
2383 X550_COMMON_EEP 2950 X550_COMMON_EEP
2384 .init_params = &ixgbe_init_eeprom_params_X550, 2951 .init_params = &ixgbe_init_eeprom_params_X550,
2385}; 2952};
2386 2953
2387static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { 2954static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
2388 X550_COMMON_EEP 2955 X550_COMMON_EEP
2389 .init_params = &ixgbe_init_eeprom_params_X540, 2956 .init_params = &ixgbe_init_eeprom_params_X540,
2390}; 2957};
@@ -2398,23 +2965,25 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
2398 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ 2965 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
2399 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ 2966 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
2400 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ 2967 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
2401 .read_reg = &ixgbe_read_phy_reg_generic, \
2402 .write_reg = &ixgbe_write_phy_reg_generic, \
2403 .setup_link = &ixgbe_setup_phy_link_generic, \ 2968 .setup_link = &ixgbe_setup_phy_link_generic, \
2404 .set_phy_power = NULL, \ 2969 .set_phy_power = NULL, \
2405 .check_overtemp = &ixgbe_tn_check_overtemp, \ 2970 .check_overtemp = &ixgbe_tn_check_overtemp, \
2406 .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, 2971 .get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
2407 2972
2408static struct ixgbe_phy_operations phy_ops_X550 = { 2973static const struct ixgbe_phy_operations phy_ops_X550 = {
2409 X550_COMMON_PHY 2974 X550_COMMON_PHY
2410 .init = NULL, 2975 .init = NULL,
2411 .identify = &ixgbe_identify_phy_generic, 2976 .identify = &ixgbe_identify_phy_generic,
2977 .read_reg = &ixgbe_read_phy_reg_generic,
2978 .write_reg = &ixgbe_write_phy_reg_generic,
2412}; 2979};
2413 2980
2414static struct ixgbe_phy_operations phy_ops_X550EM_x = { 2981static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
2415 X550_COMMON_PHY 2982 X550_COMMON_PHY
2416 .init = &ixgbe_init_phy_ops_X550em, 2983 .init = &ixgbe_init_phy_ops_X550em,
2417 .identify = &ixgbe_identify_phy_x550em, 2984 .identify = &ixgbe_identify_phy_x550em,
2985 .read_reg = &ixgbe_read_phy_reg_generic,
2986 .write_reg = &ixgbe_write_phy_reg_generic,
2418 .read_i2c_combined = &ixgbe_read_i2c_combined_generic, 2987 .read_i2c_combined = &ixgbe_read_i2c_combined_generic,
2419 .write_i2c_combined = &ixgbe_write_i2c_combined_generic, 2988 .write_i2c_combined = &ixgbe_write_i2c_combined_generic,
2420 .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, 2989 .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
@@ -2422,6 +2991,14 @@ static struct ixgbe_phy_operations phy_ops_X550EM_x = {
2422 &ixgbe_write_i2c_combined_generic_unlocked, 2991 &ixgbe_write_i2c_combined_generic_unlocked,
2423}; 2992};
2424 2993
2994static const struct ixgbe_phy_operations phy_ops_x550em_a = {
2995 X550_COMMON_PHY
2996 .init = &ixgbe_init_phy_ops_X550em,
2997 .identify = &ixgbe_identify_phy_x550em,
2998 .read_reg = &ixgbe_read_phy_reg_x550a,
2999 .write_reg = &ixgbe_write_phy_reg_x550a,
3000};
3001
2425static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { 3002static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
2426 IXGBE_MVALS_INIT(X550) 3003 IXGBE_MVALS_INIT(X550)
2427}; 3004};
@@ -2430,7 +3007,11 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
2430 IXGBE_MVALS_INIT(X550EM_x) 3007 IXGBE_MVALS_INIT(X550EM_x)
2431}; 3008};
2432 3009
2433struct ixgbe_info ixgbe_X550_info = { 3010static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
3011 IXGBE_MVALS_INIT(X550EM_a)
3012};
3013
3014const struct ixgbe_info ixgbe_X550_info = {
2434 .mac = ixgbe_mac_X550, 3015 .mac = ixgbe_mac_X550,
2435 .get_invariants = &ixgbe_get_invariants_X540, 3016 .get_invariants = &ixgbe_get_invariants_X540,
2436 .mac_ops = &mac_ops_X550, 3017 .mac_ops = &mac_ops_X550,
@@ -2440,7 +3021,7 @@ struct ixgbe_info ixgbe_X550_info = {
2440 .mvals = ixgbe_mvals_X550, 3021 .mvals = ixgbe_mvals_X550,
2441}; 3022};
2442 3023
2443struct ixgbe_info ixgbe_X550EM_x_info = { 3024const struct ixgbe_info ixgbe_X550EM_x_info = {
2444 .mac = ixgbe_mac_X550EM_x, 3025 .mac = ixgbe_mac_X550EM_x,
2445 .get_invariants = &ixgbe_get_invariants_X550_x, 3026 .get_invariants = &ixgbe_get_invariants_X550_x,
2446 .mac_ops = &mac_ops_X550EM_x, 3027 .mac_ops = &mac_ops_X550EM_x,
@@ -2449,3 +3030,13 @@ struct ixgbe_info ixgbe_X550EM_x_info = {
2449 .mbx_ops = &mbx_ops_generic, 3030 .mbx_ops = &mbx_ops_generic,
2450 .mvals = ixgbe_mvals_X550EM_x, 3031 .mvals = ixgbe_mvals_X550EM_x,
2451}; 3032};
3033
3034const struct ixgbe_info ixgbe_x550em_a_info = {
3035 .mac = ixgbe_mac_x550em_a,
3036 .get_invariants = &ixgbe_get_invariants_X550_x,
3037 .mac_ops = &mac_ops_x550em_a,
3038 .eeprom_ops = &eeprom_ops_X550EM_x,
3039 .phy_ops = &phy_ops_x550em_a,
3040 .mbx_ops = &mbx_ops_generic,
3041 .mvals = ixgbe_mvals_x550em_a,
3042};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c48aef613b0a..d7aa4b203f40 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -680,7 +680,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
680 680
681 if (if_running) 681 if (if_running)
682 /* indicate we're in test mode */ 682 /* indicate we're in test mode */
683 dev_close(netdev); 683 ixgbevf_close(netdev);
684 else 684 else
685 ixgbevf_reset(adapter); 685 ixgbevf_reset(adapter);
686 686
@@ -692,7 +692,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
692 692
693 clear_bit(__IXGBEVF_TESTING, &adapter->state); 693 clear_bit(__IXGBEVF_TESTING, &adapter->state);
694 if (if_running) 694 if (if_running)
695 dev_open(netdev); 695 ixgbevf_open(netdev);
696 } else { 696 } else {
697 hw_dbg(&adapter->hw, "online testing starting\n"); 697 hw_dbg(&adapter->hw, "online testing starting\n");
698 /* Online tests */ 698 /* Online tests */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 68ec7daa04fd..5ac60eefb0cd 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -403,13 +403,6 @@ struct ixgbevf_adapter {
403 u32 alloc_rx_page_failed; 403 u32 alloc_rx_page_failed;
404 u32 alloc_rx_buff_failed; 404 u32 alloc_rx_buff_failed;
405 405
406 /* Some features need tri-state capability,
407 * thus the additional *_CAPABLE flags.
408 */
409 u32 flags;
410#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1)
411#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
412
413 struct msix_entry *msix_entries; 406 struct msix_entry *msix_entries;
414 407
415 /* OS defined structs */ 408 /* OS defined structs */
@@ -461,6 +454,8 @@ enum ixbgevf_state_t {
461 __IXGBEVF_REMOVING, 454 __IXGBEVF_REMOVING,
462 __IXGBEVF_SERVICE_SCHED, 455 __IXGBEVF_SERVICE_SCHED,
463 __IXGBEVF_SERVICE_INITED, 456 __IXGBEVF_SERVICE_INITED,
457 __IXGBEVF_RESET_REQUESTED,
458 __IXGBEVF_QUEUE_RESET_REQUESTED,
464}; 459};
465 460
466enum ixgbevf_boards { 461enum ixgbevf_boards {
@@ -486,6 +481,8 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
486extern const char ixgbevf_driver_name[]; 481extern const char ixgbevf_driver_name[];
487extern const char ixgbevf_driver_version[]; 482extern const char ixgbevf_driver_version[];
488 483
484int ixgbevf_open(struct net_device *netdev);
485int ixgbevf_close(struct net_device *netdev);
489void ixgbevf_up(struct ixgbevf_adapter *adapter); 486void ixgbevf_up(struct ixgbevf_adapter *adapter);
490void ixgbevf_down(struct ixgbevf_adapter *adapter); 487void ixgbevf_down(struct ixgbevf_adapter *adapter);
491void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); 488void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0ea14c0a2e74..007cbe094990 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -268,7 +268,7 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
268{ 268{
269 /* Do the reset outside of interrupt context */ 269 /* Do the reset outside of interrupt context */
270 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { 270 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
271 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; 271 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
272 ixgbevf_service_event_schedule(adapter); 272 ixgbevf_service_event_schedule(adapter);
273 } 273 }
274} 274}
@@ -288,9 +288,10 @@ static void ixgbevf_tx_timeout(struct net_device *netdev)
288 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 288 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
289 * @q_vector: board private structure 289 * @q_vector: board private structure
290 * @tx_ring: tx ring to clean 290 * @tx_ring: tx ring to clean
291 * @napi_budget: Used to determine if we are in netpoll
291 **/ 292 **/
292static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 293static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
293 struct ixgbevf_ring *tx_ring) 294 struct ixgbevf_ring *tx_ring, int napi_budget)
294{ 295{
295 struct ixgbevf_adapter *adapter = q_vector->adapter; 296 struct ixgbevf_adapter *adapter = q_vector->adapter;
296 struct ixgbevf_tx_buffer *tx_buffer; 297 struct ixgbevf_tx_buffer *tx_buffer;
@@ -328,7 +329,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
328 total_packets += tx_buffer->gso_segs; 329 total_packets += tx_buffer->gso_segs;
329 330
330 /* free the skb */ 331 /* free the skb */
331 dev_kfree_skb_any(tx_buffer->skb); 332 napi_consume_skb(tx_buffer->skb, napi_budget);
332 333
333 /* unmap skb header data */ 334 /* unmap skb header data */
334 dma_unmap_single(tx_ring->dev, 335 dma_unmap_single(tx_ring->dev,
@@ -1013,8 +1014,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
1013 int per_ring_budget, work_done = 0; 1014 int per_ring_budget, work_done = 0;
1014 bool clean_complete = true; 1015 bool clean_complete = true;
1015 1016
1016 ixgbevf_for_each_ring(ring, q_vector->tx) 1017 ixgbevf_for_each_ring(ring, q_vector->tx) {
1017 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 1018 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1019 clean_complete = false;
1020 }
1018 1021
1019 if (budget <= 0) 1022 if (budget <= 0)
1020 return budget; 1023 return budget;
@@ -1035,7 +1038,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
1035 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring, 1038 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1036 per_ring_budget); 1039 per_ring_budget);
1037 work_done += cleaned; 1040 work_done += cleaned;
1038 clean_complete &= (cleaned < per_ring_budget); 1041 if (cleaned >= per_ring_budget)
1042 clean_complete = false;
1039 } 1043 }
1040 1044
1041#ifdef CONFIG_NET_RX_BUSY_POLL 1045#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -1984,7 +1988,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1984 hw->mbx.timeout = 0; 1988 hw->mbx.timeout = 0;
1985 1989
1986 /* wait for watchdog to come around and bail us out */ 1990 /* wait for watchdog to come around and bail us out */
1987 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; 1991 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
1988 } 1992 }
1989 1993
1990 return 0; 1994 return 0;
@@ -2749,11 +2753,9 @@ static void ixgbevf_service_timer(unsigned long data)
2749 2753
2750static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) 2754static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
2751{ 2755{
2752 if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED)) 2756 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
2753 return; 2757 return;
2754 2758
2755 adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
2756
2757 /* If we're already down or resetting, just bail */ 2759 /* If we're already down or resetting, just bail */
2758 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2760 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2759 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2761 test_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -2821,7 +2823,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
2821 2823
2822 /* if check for link returns error we will need to reset */ 2824 /* if check for link returns error we will need to reset */
2823 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { 2825 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
2824 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; 2826 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
2825 link_up = false; 2827 link_up = false;
2826 } 2828 }
2827 2829
@@ -3122,7 +3124,7 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3122 * handler is registered with the OS, the watchdog timer is started, 3124 * handler is registered with the OS, the watchdog timer is started,
3123 * and the stack is notified that the interface is ready. 3125 * and the stack is notified that the interface is ready.
3124 **/ 3126 **/
3125static int ixgbevf_open(struct net_device *netdev) 3127int ixgbevf_open(struct net_device *netdev)
3126{ 3128{
3127 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3129 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3128 struct ixgbe_hw *hw = &adapter->hw; 3130 struct ixgbe_hw *hw = &adapter->hw;
@@ -3205,7 +3207,7 @@ err_setup_reset:
3205 * needs to be disabled. A global MAC reset is issued to stop the 3207 * needs to be disabled. A global MAC reset is issued to stop the
3206 * hardware, and all transmit and receive resources are freed. 3208 * hardware, and all transmit and receive resources are freed.
3207 **/ 3209 **/
3208static int ixgbevf_close(struct net_device *netdev) 3210int ixgbevf_close(struct net_device *netdev)
3209{ 3211{
3210 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3212 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3211 3213
@@ -3222,11 +3224,10 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3222{ 3224{
3223 struct net_device *dev = adapter->netdev; 3225 struct net_device *dev = adapter->netdev;
3224 3226
3225 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED)) 3227 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3228 &adapter->state))
3226 return; 3229 return;
3227 3230
3228 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
3229
3230 /* if interface is down do nothing */ 3231 /* if interface is down do nothing */
3231 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 3232 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3232 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 3233 test_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -3337,76 +3338,55 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3337 return 1; 3338 return 1;
3338} 3339}
3339 3340
3341static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
3342{
3343 unsigned int offset = 0;
3344
3345 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
3346
3347 return offset == skb_checksum_start_offset(skb);
3348}
3349
3340static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 3350static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3341 struct ixgbevf_tx_buffer *first) 3351 struct ixgbevf_tx_buffer *first)
3342{ 3352{
3343 struct sk_buff *skb = first->skb; 3353 struct sk_buff *skb = first->skb;
3344 u32 vlan_macip_lens = 0; 3354 u32 vlan_macip_lens = 0;
3345 u32 mss_l4len_idx = 0;
3346 u32 type_tucmd = 0; 3355 u32 type_tucmd = 0;
3347 3356
3348 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3357 if (skb->ip_summed != CHECKSUM_PARTIAL)
3349 u8 l4_hdr = 0; 3358 goto no_csum;
3350 __be16 frag_off;
3351
3352 switch (first->protocol) {
3353 case htons(ETH_P_IP):
3354 vlan_macip_lens |= skb_network_header_len(skb);
3355 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3356 l4_hdr = ip_hdr(skb)->protocol;
3357 break;
3358 case htons(ETH_P_IPV6):
3359 vlan_macip_lens |= skb_network_header_len(skb);
3360 l4_hdr = ipv6_hdr(skb)->nexthdr;
3361 if (likely(skb_network_header_len(skb) ==
3362 sizeof(struct ipv6hdr)))
3363 break;
3364 ipv6_skip_exthdr(skb, skb_network_offset(skb) +
3365 sizeof(struct ipv6hdr),
3366 &l4_hdr, &frag_off);
3367 if (unlikely(frag_off))
3368 l4_hdr = NEXTHDR_FRAGMENT;
3369 break;
3370 default:
3371 break;
3372 }
3373 3359
3374 switch (l4_hdr) { 3360 switch (skb->csum_offset) {
3375 case IPPROTO_TCP: 3361 case offsetof(struct tcphdr, check):
3376 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 3362 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3377 mss_l4len_idx = tcp_hdrlen(skb) << 3363 /* fall through */
3378 IXGBE_ADVTXD_L4LEN_SHIFT; 3364 case offsetof(struct udphdr, check):
3379 break; 3365 break;
3380 case IPPROTO_SCTP: 3366 case offsetof(struct sctphdr, checksum):
3381 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 3367 /* validate that this is actually an SCTP request */
3382 mss_l4len_idx = sizeof(struct sctphdr) << 3368 if (((first->protocol == htons(ETH_P_IP)) &&
3383 IXGBE_ADVTXD_L4LEN_SHIFT; 3369 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
3384 break; 3370 ((first->protocol == htons(ETH_P_IPV6)) &&
3385 case IPPROTO_UDP: 3371 ixgbevf_ipv6_csum_is_sctp(skb))) {
3386 mss_l4len_idx = sizeof(struct udphdr) << 3372 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3387 IXGBE_ADVTXD_L4LEN_SHIFT;
3388 break; 3373 break;
3389 default:
3390 if (unlikely(net_ratelimit())) {
3391 dev_warn(tx_ring->dev,
3392 "partial checksum, l3 proto=%x, l4 proto=%x\n",
3393 first->protocol, l4_hdr);
3394 }
3395 skb_checksum_help(skb);
3396 goto no_csum;
3397 } 3374 }
3398 3375 /* fall through */
3399 /* update TX checksum flag */ 3376 default:
3400 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; 3377 skb_checksum_help(skb);
3378 goto no_csum;
3401 } 3379 }
3402 3380 /* update TX checksum flag */
3381 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3382 vlan_macip_lens = skb_checksum_start_offset(skb) -
3383 skb_network_offset(skb);
3403no_csum: 3384no_csum:
3404 /* vlan_macip_lens: MACLEN, VLAN tag */ 3385 /* vlan_macip_lens: MACLEN, VLAN tag */
3405 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 3386 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3406 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 3387 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3407 3388
3408 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 3389 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
3409 type_tucmd, mss_l4len_idx);
3410} 3390}
3411 3391
3412static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) 3392static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
@@ -3692,19 +3672,23 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3692 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3672 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3693 struct ixgbe_hw *hw = &adapter->hw; 3673 struct ixgbe_hw *hw = &adapter->hw;
3694 struct sockaddr *addr = p; 3674 struct sockaddr *addr = p;
3675 int err;
3695 3676
3696 if (!is_valid_ether_addr(addr->sa_data)) 3677 if (!is_valid_ether_addr(addr->sa_data))
3697 return -EADDRNOTAVAIL; 3678 return -EADDRNOTAVAIL;
3698 3679
3699 ether_addr_copy(netdev->dev_addr, addr->sa_data);
3700 ether_addr_copy(hw->mac.addr, addr->sa_data);
3701
3702 spin_lock_bh(&adapter->mbx_lock); 3680 spin_lock_bh(&adapter->mbx_lock);
3703 3681
3704 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3682 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
3705 3683
3706 spin_unlock_bh(&adapter->mbx_lock); 3684 spin_unlock_bh(&adapter->mbx_lock);
3707 3685
3686 if (err)
3687 return -EPERM;
3688
3689 ether_addr_copy(hw->mac.addr, addr->sa_data);
3690 ether_addr_copy(netdev->dev_addr, addr->sa_data);
3691
3708 return 0; 3692 return 0;
3709} 3693}
3710 3694
@@ -4009,22 +3993,25 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4009 } 3993 }
4010 3994
4011 netdev->hw_features = NETIF_F_SG | 3995 netdev->hw_features = NETIF_F_SG |
4012 NETIF_F_IP_CSUM |
4013 NETIF_F_IPV6_CSUM |
4014 NETIF_F_TSO | 3996 NETIF_F_TSO |
4015 NETIF_F_TSO6 | 3997 NETIF_F_TSO6 |
4016 NETIF_F_RXCSUM; 3998 NETIF_F_RXCSUM |
3999 NETIF_F_HW_CSUM |
4000 NETIF_F_SCTP_CRC;
4017 4001
4018 netdev->features = netdev->hw_features | 4002 netdev->features = netdev->hw_features |
4019 NETIF_F_HW_VLAN_CTAG_TX | 4003 NETIF_F_HW_VLAN_CTAG_TX |
4020 NETIF_F_HW_VLAN_CTAG_RX | 4004 NETIF_F_HW_VLAN_CTAG_RX |
4021 NETIF_F_HW_VLAN_CTAG_FILTER; 4005 NETIF_F_HW_VLAN_CTAG_FILTER;
4022 4006
4023 netdev->vlan_features |= NETIF_F_TSO | 4007 netdev->vlan_features |= NETIF_F_SG |
4008 NETIF_F_TSO |
4024 NETIF_F_TSO6 | 4009 NETIF_F_TSO6 |
4025 NETIF_F_IP_CSUM | 4010 NETIF_F_HW_CSUM |
4026 NETIF_F_IPV6_CSUM | 4011 NETIF_F_SCTP_CRC;
4027 NETIF_F_SG; 4012
4013 netdev->mpls_features |= NETIF_F_HW_CSUM;
4014 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
4028 4015
4029 if (pci_using_dac) 4016 if (pci_using_dac)
4030 netdev->features |= NETIF_F_HIGHDMA; 4017 netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 61a98f4c5746..4d613a4f2a7f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -408,8 +408,10 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
408 408
409 /* if nacked the address was rejected, use "perm_addr" */ 409 /* if nacked the address was rejected, use "perm_addr" */
410 if (!ret_val && 410 if (!ret_val &&
411 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) 411 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
412 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); 412 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
413 return IXGBE_ERR_MBX;
414 }
413 415
414 return ret_val; 416 return ret_val;
415} 417}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 577f7ca7deba..7fc490225da5 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -260,7 +260,6 @@
260 260
261#define MVNETA_VLAN_TAG_LEN 4 261#define MVNETA_VLAN_TAG_LEN 4
262 262
263#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
264#define MVNETA_TX_CSUM_DEF_SIZE 1600 263#define MVNETA_TX_CSUM_DEF_SIZE 1600
265#define MVNETA_TX_CSUM_MAX_SIZE 9800 264#define MVNETA_TX_CSUM_MAX_SIZE 9800
266#define MVNETA_ACC_MODE_EXT1 1 265#define MVNETA_ACC_MODE_EXT1 1
@@ -300,7 +299,7 @@
300#define MVNETA_RX_PKT_SIZE(mtu) \ 299#define MVNETA_RX_PKT_SIZE(mtu) \
301 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ 300 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
302 ETH_HLEN + ETH_FCS_LEN, \ 301 ETH_HLEN + ETH_FCS_LEN, \
303 MVNETA_CPU_D_CACHE_LINE_SIZE) 302 cache_line_size())
304 303
305#define IS_TSO_HEADER(txq, addr) \ 304#define IS_TSO_HEADER(txq, addr) \
306 ((addr >= txq->tso_hdrs_phys) && \ 305 ((addr >= txq->tso_hdrs_phys) && \
@@ -2764,9 +2763,6 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
2764 if (rxq->descs == NULL) 2763 if (rxq->descs == NULL)
2765 return -ENOMEM; 2764 return -ENOMEM;
2766 2765
2767 BUG_ON(rxq->descs !=
2768 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2769
2770 rxq->last_desc = rxq->size - 1; 2766 rxq->last_desc = rxq->size - 1;
2771 2767
2772 /* Set Rx descriptors queue starting address */ 2768 /* Set Rx descriptors queue starting address */
@@ -2837,10 +2833,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2837 if (txq->descs == NULL) 2833 if (txq->descs == NULL)
2838 return -ENOMEM; 2834 return -ENOMEM;
2839 2835
2840 /* Make sure descriptor address is cache line size aligned */
2841 BUG_ON(txq->descs !=
2842 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2843
2844 txq->last_desc = txq->size - 1; 2836 txq->last_desc = txq->size - 1;
2845 2837
2846 /* Set maximum bandwidth for enabled TXQs */ 2838 /* Set maximum bandwidth for enabled TXQs */
@@ -3050,6 +3042,20 @@ static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
3050 return mtu; 3042 return mtu;
3051} 3043}
3052 3044
3045static void mvneta_percpu_enable(void *arg)
3046{
3047 struct mvneta_port *pp = arg;
3048
3049 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3050}
3051
3052static void mvneta_percpu_disable(void *arg)
3053{
3054 struct mvneta_port *pp = arg;
3055
3056 disable_percpu_irq(pp->dev->irq);
3057}
3058
3053/* Change the device mtu */ 3059/* Change the device mtu */
3054static int mvneta_change_mtu(struct net_device *dev, int mtu) 3060static int mvneta_change_mtu(struct net_device *dev, int mtu)
3055{ 3061{
@@ -3074,6 +3080,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
3074 * reallocation of the queues 3080 * reallocation of the queues
3075 */ 3081 */
3076 mvneta_stop_dev(pp); 3082 mvneta_stop_dev(pp);
3083 on_each_cpu(mvneta_percpu_disable, pp, true);
3077 3084
3078 mvneta_cleanup_txqs(pp); 3085 mvneta_cleanup_txqs(pp);
3079 mvneta_cleanup_rxqs(pp); 3086 mvneta_cleanup_rxqs(pp);
@@ -3097,6 +3104,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
3097 return ret; 3104 return ret;
3098 } 3105 }
3099 3106
3107 on_each_cpu(mvneta_percpu_enable, pp, true);
3100 mvneta_start_dev(pp); 3108 mvneta_start_dev(pp);
3101 mvneta_port_up(pp); 3109 mvneta_port_up(pp);
3102 3110
@@ -3250,20 +3258,6 @@ static void mvneta_mdio_remove(struct mvneta_port *pp)
3250 pp->phy_dev = NULL; 3258 pp->phy_dev = NULL;
3251} 3259}
3252 3260
3253static void mvneta_percpu_enable(void *arg)
3254{
3255 struct mvneta_port *pp = arg;
3256
3257 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3258}
3259
3260static void mvneta_percpu_disable(void *arg)
3261{
3262 struct mvneta_port *pp = arg;
3263
3264 disable_percpu_irq(pp->dev->irq);
3265}
3266
3267/* Electing a CPU must be done in an atomic way: it should be done 3261/* Electing a CPU must be done in an atomic way: it should be done
3268 * after or before the removal/insertion of a CPU and this function is 3262 * after or before the removal/insertion of a CPU and this function is
3269 * not reentrant. 3263 * not reentrant.
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index c797971aefab..868a957f24bb 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -321,7 +321,6 @@
321/* Lbtd 802.3 type */ 321/* Lbtd 802.3 type */
322#define MVPP2_IP_LBDT_TYPE 0xfffa 322#define MVPP2_IP_LBDT_TYPE 0xfffa
323 323
324#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
325#define MVPP2_TX_CSUM_MAX_SIZE 9800 324#define MVPP2_TX_CSUM_MAX_SIZE 9800
326 325
327/* Timeout constants */ 326/* Timeout constants */
@@ -377,7 +376,7 @@
377 376
378#define MVPP2_RX_PKT_SIZE(mtu) \ 377#define MVPP2_RX_PKT_SIZE(mtu) \
379 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ 378 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
380 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) 379 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
381 380
382#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 381#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
383#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) 382#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
@@ -4493,10 +4492,6 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4493 if (!aggr_txq->descs) 4492 if (!aggr_txq->descs)
4494 return -ENOMEM; 4493 return -ENOMEM;
4495 4494
4496 /* Make sure descriptor address is cache line size aligned */
4497 BUG_ON(aggr_txq->descs !=
4498 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4499
4500 aggr_txq->last_desc = aggr_txq->size - 1; 4495 aggr_txq->last_desc = aggr_txq->size - 1;
4501 4496
4502 /* Aggr TXQ no reset WA */ 4497 /* Aggr TXQ no reset WA */
@@ -4526,9 +4521,6 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
4526 if (!rxq->descs) 4521 if (!rxq->descs)
4527 return -ENOMEM; 4522 return -ENOMEM;
4528 4523
4529 BUG_ON(rxq->descs !=
4530 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4531
4532 rxq->last_desc = rxq->size - 1; 4524 rxq->last_desc = rxq->size - 1;
4533 4525
4534 /* Zero occupied and non-occupied counters - direct access */ 4526 /* Zero occupied and non-occupied counters - direct access */
@@ -4616,10 +4608,6 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4616 if (!txq->descs) 4608 if (!txq->descs)
4617 return -ENOMEM; 4609 return -ENOMEM;
4618 4610
4619 /* Make sure descriptor address is cache line size aligned */
4620 BUG_ON(txq->descs !=
4621 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4622
4623 txq->last_desc = txq->size - 1; 4611 txq->last_desc = txq->size - 1;
4624 4612
4625 /* Set Tx descriptors queue starting address - indirect access */ 4613 /* Set Tx descriptors queue starting address - indirect access */
@@ -6059,8 +6047,10 @@ static int mvpp2_port_init(struct mvpp2_port *port)
6059 6047
6060 /* Map physical Rx queue to port's logical Rx queue */ 6048 /* Map physical Rx queue to port's logical Rx queue */
6061 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 6049 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6062 if (!rxq) 6050 if (!rxq) {
6051 err = -ENOMEM;
6063 goto err_free_percpu; 6052 goto err_free_percpu;
6053 }
6064 /* Map this Rx queue to a physical queue */ 6054 /* Map this Rx queue to a physical queue */
6065 rxq->id = port->first_rxq + queue; 6055 rxq->id = port->first_rxq + queue;
6066 rxq->port = port->id; 6056 rxq->port = port->id;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 7f2126b6a179..e0b68afea56e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1690,8 +1690,8 @@ static int mtk_probe(struct platform_device *pdev)
1690 return -ENOMEM; 1690 return -ENOMEM;
1691 1691
1692 eth->base = devm_ioremap_resource(&pdev->dev, res); 1692 eth->base = devm_ioremap_resource(&pdev->dev, res);
1693 if (!eth->base) 1693 if (IS_ERR(eth->base))
1694 return -EADDRNOTAVAIL; 1694 return PTR_ERR(eth->base);
1695 1695
1696 spin_lock_init(&eth->page_lock); 1696 spin_lock_init(&eth->page_lock);
1697 1697
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 42d8de892bfe..6aa73972d478 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -39,8 +39,6 @@
39 39
40#include "mlx4.h" 40#include "mlx4.h"
41 41
42static const u8 zero_gid[16]; /* automatically initialized to 0 */
43
44int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 42int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
45{ 43{
46 return 1 << dev->oper_log_mgm_entry_size; 44 return 1 << dev->oper_log_mgm_entry_size;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 97f5114fc113..eb926e1ee71c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -407,6 +407,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
407const char *mlx5_command_str(int command) 407const char *mlx5_command_str(int command)
408{ 408{
409 switch (command) { 409 switch (command) {
410 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
411 return "QUERY_HCA_VPORT_CONTEXT";
412
413 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
414 return "MODIFY_HCA_VPORT_CONTEXT";
415
410 case MLX5_CMD_OP_QUERY_HCA_CAP: 416 case MLX5_CMD_OP_QUERY_HCA_CAP:
411 return "QUERY_HCA_CAP"; 417 return "QUERY_HCA_CAP";
412 418
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index aa1ab4702385..75c7ae6a5cc4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -98,88 +98,55 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
98{ 98{
99 int err; 99 int err;
100 100
101 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR); 101 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
102 if (err)
103 return err;
104
105 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
106 if (err) 102 if (err)
107 return err; 103 return err;
108 104
109 if (MLX5_CAP_GEN(dev, eth_net_offloads)) { 105 if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
110 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS, 106 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
111 HCA_CAP_OPMOD_GET_CUR);
112 if (err)
113 return err;
114 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
115 HCA_CAP_OPMOD_GET_MAX);
116 if (err) 107 if (err)
117 return err; 108 return err;
118 } 109 }
119 110
120 if (MLX5_CAP_GEN(dev, pg)) { 111 if (MLX5_CAP_GEN(dev, pg)) {
121 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP, 112 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
122 HCA_CAP_OPMOD_GET_CUR);
123 if (err)
124 return err;
125 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
126 HCA_CAP_OPMOD_GET_MAX);
127 if (err) 113 if (err)
128 return err; 114 return err;
129 } 115 }
130 116
131 if (MLX5_CAP_GEN(dev, atomic)) { 117 if (MLX5_CAP_GEN(dev, atomic)) {
132 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC, 118 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
133 HCA_CAP_OPMOD_GET_CUR);
134 if (err)
135 return err;
136 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
137 HCA_CAP_OPMOD_GET_MAX);
138 if (err) 119 if (err)
139 return err; 120 return err;
140 } 121 }
141 122
142 if (MLX5_CAP_GEN(dev, roce)) { 123 if (MLX5_CAP_GEN(dev, roce)) {
143 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE, 124 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
144 HCA_CAP_OPMOD_GET_CUR);
145 if (err)
146 return err;
147 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
148 HCA_CAP_OPMOD_GET_MAX);
149 if (err) 125 if (err)
150 return err; 126 return err;
151 } 127 }
152 128
153 if (MLX5_CAP_GEN(dev, nic_flow_table)) { 129 if (MLX5_CAP_GEN(dev, nic_flow_table)) {
154 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE, 130 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
155 HCA_CAP_OPMOD_GET_CUR);
156 if (err)
157 return err;
158 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
159 HCA_CAP_OPMOD_GET_MAX);
160 if (err) 131 if (err)
161 return err; 132 return err;
162 } 133 }
163 134
164 if (MLX5_CAP_GEN(dev, vport_group_manager) && 135 if (MLX5_CAP_GEN(dev, vport_group_manager) &&
165 MLX5_CAP_GEN(dev, eswitch_flow_table)) { 136 MLX5_CAP_GEN(dev, eswitch_flow_table)) {
166 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE, 137 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
167 HCA_CAP_OPMOD_GET_CUR);
168 if (err)
169 return err;
170 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
171 HCA_CAP_OPMOD_GET_MAX);
172 if (err) 138 if (err)
173 return err; 139 return err;
174 } 140 }
175 141
176 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { 142 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
177 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH, 143 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
178 HCA_CAP_OPMOD_GET_CUR);
179 if (err) 144 if (err)
180 return err; 145 return err;
181 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH, 146 }
182 HCA_CAP_OPMOD_GET_MAX); 147
148 if (MLX5_CAP_GEN(dev, vector_calc)) {
149 err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC);
183 if (err) 150 if (err)
184 return err; 151 return err;
185 } 152 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 72a94e72ee25..3f3b2fae4991 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -341,8 +341,9 @@ static u16 to_fw_pkey_sz(u32 size)
341 } 341 }
342} 342}
343 343
344int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, 344static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
345 enum mlx5_cap_mode cap_mode) 345 enum mlx5_cap_type cap_type,
346 enum mlx5_cap_mode cap_mode)
346{ 347{
347 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; 348 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
348 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 349 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
@@ -392,6 +393,16 @@ query_ex:
392 return err; 393 return err;
393} 394}
394 395
396int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
397{
398 int ret;
399
400 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
401 if (ret)
402 return ret;
403 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
404}
405
395static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod) 406static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
396{ 407{
397 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)]; 408 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
@@ -419,8 +430,7 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
419 int err; 430 int err;
420 431
421 if (MLX5_CAP_GEN(dev, atomic)) { 432 if (MLX5_CAP_GEN(dev, atomic)) {
422 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC, 433 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
423 HCA_CAP_OPMOD_GET_CUR);
424 if (err) 434 if (err)
425 return err; 435 return err;
426 } else { 436 } else {
@@ -462,11 +472,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
462 if (!set_ctx) 472 if (!set_ctx)
463 goto query_ex; 473 goto query_ex;
464 474
465 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX); 475 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
466 if (err)
467 goto query_ex;
468
469 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
470 if (err) 476 if (err)
471 goto query_ex; 477 goto query_ex;
472 478
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 90ab09e375b8..bd518405859e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -852,7 +852,8 @@ int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
852EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce); 852EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
853 853
854int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, 854int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
855 u8 port_num, void *out, size_t out_sz) 855 int vf, u8 port_num, void *out,
856 size_t out_sz)
856{ 857{
857 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in); 858 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
858 int is_group_manager; 859 int is_group_manager;
@@ -871,7 +872,7 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
871 if (other_vport) { 872 if (other_vport) {
872 if (is_group_manager) { 873 if (is_group_manager) {
873 MLX5_SET(query_vport_counter_in, in, other_vport, 1); 874 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
874 MLX5_SET(query_vport_counter_in, in, vport_number, 0); 875 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
875 } else { 876 } else {
876 err = -EPERM; 877 err = -EPERM;
877 goto free; 878 goto free;
@@ -890,3 +891,70 @@ free:
890 return err; 891 return err;
891} 892}
892EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter); 893EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
894
895int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
896 u8 other_vport, u8 port_num,
897 int vf,
898 struct mlx5_hca_vport_context *req)
899{
900 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
901 u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
902 int is_group_manager;
903 void *in;
904 int err;
905 void *ctx;
906
907 mlx5_core_dbg(dev, "vf %d\n", vf);
908 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
909 in = kzalloc(in_sz, GFP_KERNEL);
910 if (!in)
911 return -ENOMEM;
912
913 memset(out, 0, sizeof(out));
914 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
915 if (other_vport) {
916 if (is_group_manager) {
917 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
918 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
919 } else {
920 err = -EPERM;
921 goto ex;
922 }
923 }
924
925 if (MLX5_CAP_GEN(dev, num_ports) > 1)
926 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
927
928 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
929 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
930 MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
931 MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
932 MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
933 MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
934 MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
935 MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
936 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
937 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
938 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
939 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
940 MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
941 MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
942 MLX5_SET(hca_vport_context, ctx, lid, req->lid);
943 MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
944 MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
945 MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
946 MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
947 MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
948 MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
949 MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
950 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
951 if (err)
952 goto ex;
953
954 err = mlx5_cmd_status_to_err_v2(out);
955
956ex:
957 kfree(in);
958 return err;
959}
960EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 2ad7f67854d5..5989f7cb5462 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -50,3 +50,11 @@ config MLXSW_SPECTRUM
50 50
51 To compile this driver as a module, choose M here: the 51 To compile this driver as a module, choose M here: the
52 module will be called mlxsw_spectrum. 52 module will be called mlxsw_spectrum.
53
54config MLXSW_SPECTRUM_DCB
55 bool "Data Center Bridging (DCB) support"
56 depends on MLXSW_SPECTRUM && DCB
57 default y
58 ---help---
59 Say Y here if you want to use Data Center Bridging (DCB) in the
60 driver.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 584cac444852..9b5ebf84c051 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -8,3 +8,4 @@ mlxsw_switchx2-objs := switchx2.o
8obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o 8obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
9mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ 9mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
10 spectrum_switchdev.o 10 spectrum_switchdev.o
11mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f69f6280519f..3958195526d1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -114,6 +114,12 @@ struct mlxsw_core {
114 /* driver_priv has to be always the last item */ 114 /* driver_priv has to be always the last item */
115}; 115};
116 116
117void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
118{
119 return mlxsw_core->driver_priv;
120}
121EXPORT_SYMBOL(mlxsw_core_driver_priv);
122
117struct mlxsw_rx_listener_item { 123struct mlxsw_rx_listener_item {
118 struct list_head list; 124 struct list_head list;
119 struct mlxsw_rx_listener rxl; 125 struct mlxsw_rx_listener rxl;
@@ -381,7 +387,7 @@ static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
381 387
382 mlxsw_core->emad.trans_active = true; 388 mlxsw_core->emad.trans_active = true;
383 389
384 err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info); 390 err = mlxsw_core_skb_transmit(mlxsw_core, skb, tx_info);
385 if (err) { 391 if (err) {
386 dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", 392 dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
387 mlxsw_core->emad.tid); 393 mlxsw_core->emad.tid);
@@ -795,8 +801,7 @@ static int mlxsw_devlink_port_split(struct devlink *devlink,
795 return -EINVAL; 801 return -EINVAL;
796 if (!mlxsw_core->driver->port_split) 802 if (!mlxsw_core->driver->port_split)
797 return -EOPNOTSUPP; 803 return -EOPNOTSUPP;
798 return mlxsw_core->driver->port_split(mlxsw_core->driver_priv, 804 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
799 port_index, count);
800} 805}
801 806
802static int mlxsw_devlink_port_unsplit(struct devlink *devlink, 807static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
@@ -808,8 +813,7 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
808 return -EINVAL; 813 return -EINVAL;
809 if (!mlxsw_core->driver->port_unsplit) 814 if (!mlxsw_core->driver->port_unsplit)
810 return -EOPNOTSUPP; 815 return -EOPNOTSUPP;
811 return mlxsw_core->driver->port_unsplit(mlxsw_core->driver_priv, 816 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
812 port_index);
813} 817}
814 818
815static const struct devlink_ops mlxsw_devlink_ops = { 819static const struct devlink_ops mlxsw_devlink_ops = {
@@ -880,8 +884,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
880 if (err) 884 if (err)
881 goto err_devlink_register; 885 goto err_devlink_register;
882 886
883 err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, 887 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
884 mlxsw_bus_info);
885 if (err) 888 if (err)
886 goto err_driver_init; 889 goto err_driver_init;
887 890
@@ -892,7 +895,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
892 return 0; 895 return 0;
893 896
894err_debugfs_init: 897err_debugfs_init:
895 mlxsw_core->driver->fini(mlxsw_core->driver_priv); 898 mlxsw_core->driver->fini(mlxsw_core);
896err_driver_init: 899err_driver_init:
897 devlink_unregister(devlink); 900 devlink_unregister(devlink);
898err_devlink_register: 901err_devlink_register:
@@ -918,7 +921,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
918 struct devlink *devlink = priv_to_devlink(mlxsw_core); 921 struct devlink *devlink = priv_to_devlink(mlxsw_core);
919 922
920 mlxsw_core_debugfs_fini(mlxsw_core); 923 mlxsw_core_debugfs_fini(mlxsw_core);
921 mlxsw_core->driver->fini(mlxsw_core->driver_priv); 924 mlxsw_core->driver->fini(mlxsw_core);
922 devlink_unregister(devlink); 925 devlink_unregister(devlink);
923 mlxsw_emad_fini(mlxsw_core); 926 mlxsw_emad_fini(mlxsw_core);
924 mlxsw_core->bus->fini(mlxsw_core->bus_priv); 927 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
@@ -929,26 +932,17 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
929} 932}
930EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); 933EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
931 934
932static struct mlxsw_core *__mlxsw_core_get(void *driver_priv) 935bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
933{
934 return container_of(driver_priv, struct mlxsw_core, driver_priv);
935}
936
937bool mlxsw_core_skb_transmit_busy(void *driver_priv,
938 const struct mlxsw_tx_info *tx_info) 936 const struct mlxsw_tx_info *tx_info)
939{ 937{
940 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
941
942 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, 938 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
943 tx_info); 939 tx_info);
944} 940}
945EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); 941EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
946 942
947int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, 943int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
948 const struct mlxsw_tx_info *tx_info) 944 const struct mlxsw_tx_info *tx_info)
949{ 945{
950 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
951
952 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, 946 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
953 tx_info); 947 tx_info);
954} 948}
@@ -1358,6 +1352,28 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1358} 1352}
1359EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); 1353EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1360 1354
1355int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
1356 struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
1357 struct net_device *dev, bool split, u32 split_group)
1358{
1359 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1360 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1361
1362 if (split)
1363 devlink_port_split_set(devlink_port, split_group);
1364 devlink_port_type_eth_set(devlink_port, dev);
1365 return devlink_port_register(devlink, devlink_port, local_port);
1366}
1367EXPORT_SYMBOL(mlxsw_core_port_init);
1368
1369void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
1370{
1371 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1372
1373 devlink_port_unregister(devlink_port);
1374}
1375EXPORT_SYMBOL(mlxsw_core_port_fini);
1376
1361int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, 1377int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1362 u32 in_mod, bool out_mbox_direct, 1378 u32 in_mod, bool out_mbox_direct,
1363 char *in_mbox, size_t in_mbox_size, 1379 char *in_mbox, size_t in_mbox_size,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c73d1c0792a6..f3cebef9c31c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -43,6 +43,7 @@
43#include <linux/gfp.h> 43#include <linux/gfp.h>
44#include <linux/types.h> 44#include <linux/types.h>
45#include <linux/skbuff.h> 45#include <linux/skbuff.h>
46#include <net/devlink.h>
46 47
47#include "trap.h" 48#include "trap.h"
48#include "reg.h" 49#include "reg.h"
@@ -61,6 +62,8 @@ struct mlxsw_driver;
61struct mlxsw_bus; 62struct mlxsw_bus;
62struct mlxsw_bus_info; 63struct mlxsw_bus_info;
63 64
65void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
66
64int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver); 67int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
65void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver); 68void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
66 69
@@ -74,10 +77,9 @@ struct mlxsw_tx_info {
74 bool is_emad; 77 bool is_emad;
75}; 78};
76 79
77bool mlxsw_core_skb_transmit_busy(void *driver_priv, 80bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
78 const struct mlxsw_tx_info *tx_info); 81 const struct mlxsw_tx_info *tx_info);
79 82int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
80int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
81 const struct mlxsw_tx_info *tx_info); 83 const struct mlxsw_tx_info *tx_info);
82 84
83struct mlxsw_rx_listener { 85struct mlxsw_rx_listener {
@@ -131,6 +133,15 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
131void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, 133void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
132 u16 lag_id, u8 local_port); 134 u16 lag_id, u8 local_port);
133 135
136struct mlxsw_core_port {
137 struct devlink_port devlink_port;
138};
139
140int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
141 struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
142 struct net_device *dev, bool split, u32 split_group);
143void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port);
144
134#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 145#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
135 146
136struct mlxsw_swid_config { 147struct mlxsw_swid_config {
@@ -183,11 +194,12 @@ struct mlxsw_driver {
183 const char *kind; 194 const char *kind;
184 struct module *owner; 195 struct module *owner;
185 size_t priv_size; 196 size_t priv_size;
186 int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core, 197 int (*init)(struct mlxsw_core *mlxsw_core,
187 const struct mlxsw_bus_info *mlxsw_bus_info); 198 const struct mlxsw_bus_info *mlxsw_bus_info);
188 void (*fini)(void *driver_priv); 199 void (*fini)(struct mlxsw_core *mlxsw_core);
189 int (*port_split)(void *driver_priv, u8 local_port, unsigned int count); 200 int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port,
190 int (*port_unsplit)(void *driver_priv, u8 local_port); 201 unsigned int count);
202 int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port);
191 void (*txhdr_construct)(struct sk_buff *skb, 203 void (*txhdr_construct)(struct sk_buff *skb,
192 const struct mlxsw_tx_info *tx_info); 204 const struct mlxsw_tx_info *tx_info);
193 u8 txhdr_len; 205 u8 txhdr_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index ffe4c0305733..57e4a6337ae3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1805,6 +1805,184 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
1805 } 1805 }
1806} 1806}
1807 1807
1808/* QTCT - QoS Switch Traffic Class Table
1809 * -------------------------------------
1810 * Configures the mapping between the packet switch priority and the
1811 * traffic class on the transmit port.
1812 */
1813#define MLXSW_REG_QTCT_ID 0x400A
1814#define MLXSW_REG_QTCT_LEN 0x08
1815
1816static const struct mlxsw_reg_info mlxsw_reg_qtct = {
1817 .id = MLXSW_REG_QTCT_ID,
1818 .len = MLXSW_REG_QTCT_LEN,
1819};
1820
1821/* reg_qtct_local_port
1822 * Local port number.
1823 * Access: Index
1824 *
1825 * Note: CPU port is not supported.
1826 */
1827MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8);
1828
1829/* reg_qtct_sub_port
1830 * Virtual port within the physical port.
1831 * Should be set to 0 when virtual ports are not enabled on the port.
1832 * Access: Index
1833 */
1834MLXSW_ITEM32(reg, qtct, sub_port, 0x00, 8, 8);
1835
1836/* reg_qtct_switch_prio
1837 * Switch priority.
1838 * Access: Index
1839 */
1840MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4);
1841
1842/* reg_qtct_tclass
1843 * Traffic class.
1844 * Default values:
1845 * switch_prio 0 : tclass 1
1846 * switch_prio 1 : tclass 0
1847 * switch_prio i : tclass i, for i > 1
1848 * Access: RW
1849 */
1850MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4);
1851
1852static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
1853 u8 switch_prio, u8 tclass)
1854{
1855 MLXSW_REG_ZERO(qtct, payload);
1856 mlxsw_reg_qtct_local_port_set(payload, local_port);
1857 mlxsw_reg_qtct_switch_prio_set(payload, switch_prio);
1858 mlxsw_reg_qtct_tclass_set(payload, tclass);
1859}
1860
1861/* QEEC - QoS ETS Element Configuration Register
1862 * ---------------------------------------------
1863 * Configures the ETS elements.
1864 */
1865#define MLXSW_REG_QEEC_ID 0x400D
1866#define MLXSW_REG_QEEC_LEN 0x1C
1867
1868static const struct mlxsw_reg_info mlxsw_reg_qeec = {
1869 .id = MLXSW_REG_QEEC_ID,
1870 .len = MLXSW_REG_QEEC_LEN,
1871};
1872
1873/* reg_qeec_local_port
1874 * Local port number.
1875 * Access: Index
1876 *
1877 * Note: CPU port is supported.
1878 */
1879MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
1880
1881enum mlxsw_reg_qeec_hr {
1882 MLXSW_REG_QEEC_HIERARCY_PORT,
1883 MLXSW_REG_QEEC_HIERARCY_GROUP,
1884 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1885 MLXSW_REG_QEEC_HIERARCY_TC,
1886};
1887
1888/* reg_qeec_element_hierarchy
1889 * 0 - Port
1890 * 1 - Group
1891 * 2 - Subgroup
1892 * 3 - Traffic Class
1893 * Access: Index
1894 */
1895MLXSW_ITEM32(reg, qeec, element_hierarchy, 0x04, 16, 4);
1896
1897/* reg_qeec_element_index
1898 * The index of the element in the hierarchy.
1899 * Access: Index
1900 */
1901MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
1902
1903/* reg_qeec_next_element_index
1904 * The index of the next (lower) element in the hierarchy.
1905 * Access: RW
1906 *
1907 * Note: Reserved for element_hierarchy 0.
1908 */
1909MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
1910
1911enum {
1912 MLXSW_REG_QEEC_BYTES_MODE,
1913 MLXSW_REG_QEEC_PACKETS_MODE,
1914};
1915
1916/* reg_qeec_pb
1917 * Packets or bytes mode.
1918 * 0 - Bytes mode
1919 * 1 - Packets mode
1920 * Access: RW
1921 *
1922 * Note: Used for max shaper configuration. For Spectrum, packets mode
1923 * is supported only for traffic classes of CPU port.
1924 */
1925MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
1926
1927/* reg_qeec_mase
1928 * Max shaper configuration enable. Enables configuration of the max
1929 * shaper on this ETS element.
1930 * 0 - Disable
1931 * 1 - Enable
1932 * Access: RW
1933 */
1934MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
1935
1936/* A large max rate will disable the max shaper. */
1937#define MLXSW_REG_QEEC_MAS_DIS 200000000 /* Kbps */
1938
1939/* reg_qeec_max_shaper_rate
1940 * Max shaper information rate.
1941 * For CPU port, can only be configured for port hierarchy.
1942 * When in bytes mode, value is specified in units of 1000bps.
1943 * Access: RW
1944 */
1945MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28);
1946
1947/* reg_qeec_de
1948 * DWRR configuration enable. Enables configuration of the dwrr and
1949 * dwrr_weight.
1950 * 0 - Disable
1951 * 1 - Enable
1952 * Access: RW
1953 */
1954MLXSW_ITEM32(reg, qeec, de, 0x18, 31, 1);
1955
1956/* reg_qeec_dwrr
1957 * Transmission selection algorithm to use on the link going down from
1958 * the ETS element.
1959 * 0 - Strict priority
1960 * 1 - DWRR
1961 * Access: RW
1962 */
1963MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1);
1964
1965/* reg_qeec_dwrr_weight
1966 * DWRR weight on the link going down from the ETS element. The
1967 * percentage of bandwidth guaranteed to an ETS element within
1968 * its hierarchy. The sum of all weights across all ETS elements
1969 * within one hierarchy should be equal to 100. Reserved when
1970 * transmission selection algorithm is strict priority.
1971 * Access: RW
1972 */
1973MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8);
1974
1975static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
1976 enum mlxsw_reg_qeec_hr hr, u8 index,
1977 u8 next_index)
1978{
1979 MLXSW_REG_ZERO(qeec, payload);
1980 mlxsw_reg_qeec_local_port_set(payload, local_port);
1981 mlxsw_reg_qeec_element_hierarchy_set(payload, hr);
1982 mlxsw_reg_qeec_element_index_set(payload, index);
1983 mlxsw_reg_qeec_next_element_index_set(payload, next_index);
1984}
1985
1808/* PMLP - Ports Module to Local Port Register 1986/* PMLP - Ports Module to Local Port Register
1809 * ------------------------------------------ 1987 * ------------------------------------------
1810 * Configures the assignment of modules to local ports. 1988 * Configures the assignment of modules to local ports.
@@ -2141,6 +2319,145 @@ static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
2141 mlxsw_reg_paos_e_set(payload, 1); 2319 mlxsw_reg_paos_e_set(payload, 1);
2142} 2320}
2143 2321
2322/* PFCC - Ports Flow Control Configuration Register
2323 * ------------------------------------------------
2324 * Configures and retrieves the per port flow control configuration.
2325 */
2326#define MLXSW_REG_PFCC_ID 0x5007
2327#define MLXSW_REG_PFCC_LEN 0x20
2328
2329static const struct mlxsw_reg_info mlxsw_reg_pfcc = {
2330 .id = MLXSW_REG_PFCC_ID,
2331 .len = MLXSW_REG_PFCC_LEN,
2332};
2333
2334/* reg_pfcc_local_port
2335 * Local port number.
2336 * Access: Index
2337 */
2338MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8);
2339
2340/* reg_pfcc_pnat
2341 * Port number access type. Determines the way local_port is interpreted:
2342 * 0 - Local port number.
2343 * 1 - IB / label port number.
2344 * Access: Index
2345 */
2346MLXSW_ITEM32(reg, pfcc, pnat, 0x00, 14, 2);
2347
2348/* reg_pfcc_shl_cap
2349 * Send to higher layers capabilities:
2350 * 0 - No capability of sending Pause and PFC frames to higher layers.
2351 * 1 - Device has capability of sending Pause and PFC frames to higher
2352 * layers.
2353 * Access: RO
2354 */
2355MLXSW_ITEM32(reg, pfcc, shl_cap, 0x00, 1, 1);
2356
2357/* reg_pfcc_shl_opr
2358 * Send to higher layers operation:
2359 * 0 - Pause and PFC frames are handled by the port (default).
2360 * 1 - Pause and PFC frames are handled by the port and also sent to
2361 * higher layers. Only valid if shl_cap = 1.
2362 * Access: RW
2363 */
2364MLXSW_ITEM32(reg, pfcc, shl_opr, 0x00, 0, 1);
2365
2366/* reg_pfcc_ppan
2367 * Pause policy auto negotiation.
2368 * 0 - Disabled. Generate / ignore Pause frames based on pptx / pprtx.
2369 * 1 - Enabled. When auto-negotiation is performed, set the Pause policy
2370 * based on the auto-negotiation resolution.
2371 * Access: RW
2372 *
2373 * Note: The auto-negotiation advertisement is set according to pptx and
2374 * pprtx. When PFC is set on Tx / Rx, ppan must be set to 0.
2375 */
2376MLXSW_ITEM32(reg, pfcc, ppan, 0x04, 28, 4);
2377
2378/* reg_pfcc_prio_mask_tx
2379 * Bit per priority indicating if Tx flow control policy should be
2380 * updated based on bit pfctx.
2381 * Access: WO
2382 */
2383MLXSW_ITEM32(reg, pfcc, prio_mask_tx, 0x04, 16, 8);
2384
2385/* reg_pfcc_prio_mask_rx
2386 * Bit per priority indicating if Rx flow control policy should be
2387 * updated based on bit pfcrx.
2388 * Access: WO
2389 */
2390MLXSW_ITEM32(reg, pfcc, prio_mask_rx, 0x04, 0, 8);
2391
2392/* reg_pfcc_pptx
2393 * Admin Pause policy on Tx.
2394 * 0 - Never generate Pause frames (default).
2395 * 1 - Generate Pause frames according to Rx buffer threshold.
2396 * Access: RW
2397 */
2398MLXSW_ITEM32(reg, pfcc, pptx, 0x08, 31, 1);
2399
2400/* reg_pfcc_aptx
2401 * Active (operational) Pause policy on Tx.
2402 * 0 - Never generate Pause frames.
2403 * 1 - Generate Pause frames according to Rx buffer threshold.
2404 * Access: RO
2405 */
2406MLXSW_ITEM32(reg, pfcc, aptx, 0x08, 30, 1);
2407
2408/* reg_pfcc_pfctx
2409 * Priority based flow control policy on Tx[7:0]. Per-priority bit mask:
2410 * 0 - Never generate priority Pause frames on the specified priority
2411 * (default).
2412 * 1 - Generate priority Pause frames according to Rx buffer threshold on
2413 * the specified priority.
2414 * Access: RW
2415 *
2416 * Note: pfctx and pptx must be mutually exclusive.
2417 */
2418MLXSW_ITEM32(reg, pfcc, pfctx, 0x08, 16, 8);
2419
2420/* reg_pfcc_pprx
2421 * Admin Pause policy on Rx.
2422 * 0 - Ignore received Pause frames (default).
2423 * 1 - Respect received Pause frames.
2424 * Access: RW
2425 */
2426MLXSW_ITEM32(reg, pfcc, pprx, 0x0C, 31, 1);
2427
2428/* reg_pfcc_aprx
2429 * Active (operational) Pause policy on Rx.
2430 * 0 - Ignore received Pause frames.
2431 * 1 - Respect received Pause frames.
2432 * Access: RO
2433 */
2434MLXSW_ITEM32(reg, pfcc, aprx, 0x0C, 30, 1);
2435
2436/* reg_pfcc_pfcrx
2437 * Priority based flow control policy on Rx[7:0]. Per-priority bit mask:
2438 * 0 - Ignore incoming priority Pause frames on the specified priority
2439 * (default).
2440 * 1 - Respect incoming priority Pause frames on the specified priority.
2441 * Access: RW
2442 */
2443MLXSW_ITEM32(reg, pfcc, pfcrx, 0x0C, 16, 8);
2444
2445#define MLXSW_REG_PFCC_ALL_PRIO 0xFF
2446
2447static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en)
2448{
2449 mlxsw_reg_pfcc_prio_mask_tx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
2450 mlxsw_reg_pfcc_prio_mask_rx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
2451 mlxsw_reg_pfcc_pfctx_set(payload, pfc_en);
2452 mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en);
2453}
2454
2455static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
2456{
2457 MLXSW_REG_ZERO(pfcc, payload);
2458 mlxsw_reg_pfcc_local_port_set(payload, local_port);
2459}
2460
2144/* PPCNT - Ports Performance Counters Register 2461/* PPCNT - Ports Performance Counters Register
2145 * ------------------------------------------- 2462 * -------------------------------------------
2146 * The PPCNT register retrieves per port performance counters. 2463 * The PPCNT register retrieves per port performance counters.
@@ -2180,6 +2497,11 @@ MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
2180 */ 2497 */
2181MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); 2498MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
2182 2499
2500enum mlxsw_reg_ppcnt_grp {
2501 MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
2502 MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
2503};
2504
2183/* reg_ppcnt_grp 2505/* reg_ppcnt_grp
2184 * Performance counter group. 2506 * Performance counter group.
2185 * Group 63 indicates all groups. Only valid on Set() operation with 2507 * Group 63 indicates all groups. Only valid on Set() operation with
@@ -2215,6 +2537,8 @@ MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
2215 */ 2537 */
2216MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5); 2538MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
2217 2539
2540/* Ethernet IEEE 802.3 Counter Group */
2541
2218/* reg_ppcnt_a_frames_transmitted_ok 2542/* reg_ppcnt_a_frames_transmitted_ok
2219 * Access: RO 2543 * Access: RO
2220 */ 2544 */
@@ -2329,15 +2653,145 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
2329MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, 2653MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
2330 0x08 + 0x90, 0, 64); 2654 0x08 + 0x90, 0, 64);
2331 2655
2332static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port) 2656/* Ethernet Per Priority Group Counters */
2657
2658/* reg_ppcnt_rx_octets
2659 * Access: RO
2660 */
2661MLXSW_ITEM64(reg, ppcnt, rx_octets, 0x08 + 0x00, 0, 64);
2662
2663/* reg_ppcnt_rx_frames
2664 * Access: RO
2665 */
2666MLXSW_ITEM64(reg, ppcnt, rx_frames, 0x08 + 0x20, 0, 64);
2667
2668/* reg_ppcnt_tx_octets
2669 * Access: RO
2670 */
2671MLXSW_ITEM64(reg, ppcnt, tx_octets, 0x08 + 0x28, 0, 64);
2672
2673/* reg_ppcnt_tx_frames
2674 * Access: RO
2675 */
2676MLXSW_ITEM64(reg, ppcnt, tx_frames, 0x08 + 0x48, 0, 64);
2677
2678/* reg_ppcnt_rx_pause
2679 * Access: RO
2680 */
2681MLXSW_ITEM64(reg, ppcnt, rx_pause, 0x08 + 0x50, 0, 64);
2682
2683/* reg_ppcnt_rx_pause_duration
2684 * Access: RO
2685 */
2686MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, 0x08 + 0x58, 0, 64);
2687
2688/* reg_ppcnt_tx_pause
2689 * Access: RO
2690 */
2691MLXSW_ITEM64(reg, ppcnt, tx_pause, 0x08 + 0x60, 0, 64);
2692
2693/* reg_ppcnt_tx_pause_duration
2694 * Access: RO
2695 */
2696MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64);
2697
2698/* reg_ppcnt_rx_pause_transition
2699 * Access: RO
2700 */
2701MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
2702
2703static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
2704 enum mlxsw_reg_ppcnt_grp grp,
2705 u8 prio_tc)
2333{ 2706{
2334 MLXSW_REG_ZERO(ppcnt, payload); 2707 MLXSW_REG_ZERO(ppcnt, payload);
2335 mlxsw_reg_ppcnt_swid_set(payload, 0); 2708 mlxsw_reg_ppcnt_swid_set(payload, 0);
2336 mlxsw_reg_ppcnt_local_port_set(payload, local_port); 2709 mlxsw_reg_ppcnt_local_port_set(payload, local_port);
2337 mlxsw_reg_ppcnt_pnat_set(payload, 0); 2710 mlxsw_reg_ppcnt_pnat_set(payload, 0);
2338 mlxsw_reg_ppcnt_grp_set(payload, 0); 2711 mlxsw_reg_ppcnt_grp_set(payload, grp);
2339 mlxsw_reg_ppcnt_clr_set(payload, 0); 2712 mlxsw_reg_ppcnt_clr_set(payload, 0);
2340 mlxsw_reg_ppcnt_prio_tc_set(payload, 0); 2713 mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
2714}
2715
2716/* PPTB - Port Prio To Buffer Register
2717 * -----------------------------------
2718 * Configures the switch priority to buffer table.
2719 */
2720#define MLXSW_REG_PPTB_ID 0x500B
2721#define MLXSW_REG_PPTB_LEN 0x0C
2722
2723static const struct mlxsw_reg_info mlxsw_reg_pptb = {
2724 .id = MLXSW_REG_PPTB_ID,
2725 .len = MLXSW_REG_PPTB_LEN,
2726};
2727
2728enum {
2729 MLXSW_REG_PPTB_MM_UM,
2730 MLXSW_REG_PPTB_MM_UNICAST,
2731 MLXSW_REG_PPTB_MM_MULTICAST,
2732};
2733
2734/* reg_pptb_mm
2735 * Mapping mode.
2736 * 0 - Map both unicast and multicast packets to the same buffer.
2737 * 1 - Map only unicast packets.
2738 * 2 - Map only multicast packets.
2739 * Access: Index
2740 *
2741 * Note: SwitchX-2 only supports the first option.
2742 */
2743MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2);
2744
2745/* reg_pptb_local_port
2746 * Local port number.
2747 * Access: Index
2748 */
2749MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8);
2750
2751/* reg_pptb_um
2752 * Enables the update of the untagged_buf field.
2753 * Access: RW
2754 */
2755MLXSW_ITEM32(reg, pptb, um, 0x00, 8, 1);
2756
2757/* reg_pptb_pm
2758 * Enables the update of the prio_to_buff field.
2759 * Bit <i> is a flag for updating the mapping for switch priority <i>.
2760 * Access: RW
2761 */
2762MLXSW_ITEM32(reg, pptb, pm, 0x00, 0, 8);
2763
2764/* reg_pptb_prio_to_buff
2765 * Mapping of switch priority <i> to one of the allocated receive port
2766 * buffers.
2767 * Access: RW
2768 */
2769MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff, 0x04, 0x04, 4);
2770
2771/* reg_pptb_pm_msb
2772 * Enables the update of the prio_to_buff field.
2773 * Bit <i> is a flag for updating the mapping for switch priority <i+8>.
2774 * Access: RW
2775 */
2776MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
2777
2778/* reg_pptb_untagged_buff
2779 * Mapping of untagged frames to one of the allocated receive port buffers.
2780 * Access: RW
2781 *
2782 * Note: In SwitchX-2 this field must be mapped to buffer 8. Reserved for
2783 * Spectrum, as it maps untagged packets based on the default switch priority.
2784 */
2785MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
2786
2787#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
2788
2789static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
2790{
2791 MLXSW_REG_ZERO(pptb, payload);
2792 mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
2793 mlxsw_reg_pptb_local_port_set(payload, local_port);
2794 mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
2341} 2795}
2342 2796
2343/* PBMC - Port Buffer Management Control Register 2797/* PBMC - Port Buffer Management Control Register
@@ -2346,7 +2800,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
2346 * allocation for different Prios, and the Pause threshold management. 2800 * allocation for different Prios, and the Pause threshold management.
2347 */ 2801 */
2348#define MLXSW_REG_PBMC_ID 0x500C 2802#define MLXSW_REG_PBMC_ID 0x500C
2349#define MLXSW_REG_PBMC_LEN 0x68 2803#define MLXSW_REG_PBMC_LEN 0x6C
2350 2804
2351static const struct mlxsw_reg_info mlxsw_reg_pbmc = { 2805static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
2352 .id = MLXSW_REG_PBMC_ID, 2806 .id = MLXSW_REG_PBMC_ID,
@@ -2374,6 +2828,8 @@ MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16);
2374 */ 2828 */
2375MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16); 2829MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
2376 2830
2831#define MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX 11
2832
2377/* reg_pbmc_buf_lossy 2833/* reg_pbmc_buf_lossy
2378 * The field indicates if the buffer is lossy. 2834 * The field indicates if the buffer is lossy.
2379 * 0 - Lossless 2835 * 0 - Lossless
@@ -2398,6 +2854,30 @@ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false);
2398 */ 2854 */
2399MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false); 2855MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
2400 2856
2857/* reg_pbmc_buf_xoff_threshold
2858 * Once the amount of data in the buffer goes above this value, device
2859 * starts sending PFC frames for all priorities associated with the
2860 * buffer. Units are represented in cells. Reserved in case of lossy
2861 * buffer.
2862 * Access: RW
2863 *
2864 * Note: In Spectrum, reserved for buffer[9].
2865 */
2866MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16,
2867 0x08, 0x04, false);
2868
2869/* reg_pbmc_buf_xon_threshold
2870 * When the amount of data in the buffer goes below this value, device
2871 * stops sending PFC frames for the priorities associated with the
2872 * buffer. Units are represented in cells. Reserved in case of lossy
2873 * buffer.
2874 * Access: RW
2875 *
2876 * Note: In Spectrum, reserved for buffer[9].
2877 */
2878MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16,
2879 0x08, 0x04, false);
2880
2401static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port, 2881static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
2402 u16 xoff_timer_value, u16 xoff_refresh) 2882 u16 xoff_timer_value, u16 xoff_refresh)
2403{ 2883{
@@ -2416,6 +2896,17 @@ static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload,
2416 mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size); 2896 mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
2417} 2897}
2418 2898
2899static inline void mlxsw_reg_pbmc_lossless_buffer_pack(char *payload,
2900 int buf_index, u16 size,
2901 u16 threshold)
2902{
2903 mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 0);
2904 mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
2905 mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
2906 mlxsw_reg_pbmc_buf_xoff_threshold_set(payload, buf_index, threshold);
2907 mlxsw_reg_pbmc_buf_xon_threshold_set(payload, buf_index, threshold);
2908}
2909
2419/* PSPA - Port Switch Partition Allocation 2910/* PSPA - Port Switch Partition Allocation
2420 * --------------------------------------- 2911 * ---------------------------------------
2421 * Controls the association of a port with a switch partition and enables 2912 * Controls the association of a port with a switch partition and enables
@@ -2985,9 +3476,10 @@ static const struct mlxsw_reg_info mlxsw_reg_sbpr = {
2985 .len = MLXSW_REG_SBPR_LEN, 3476 .len = MLXSW_REG_SBPR_LEN,
2986}; 3477};
2987 3478
2988enum mlxsw_reg_sbpr_dir { 3479/* shared direstion enum for SBPR, SBCM, SBPM */
2989 MLXSW_REG_SBPR_DIR_INGRESS, 3480enum mlxsw_reg_sbxx_dir {
2990 MLXSW_REG_SBPR_DIR_EGRESS, 3481 MLXSW_REG_SBXX_DIR_INGRESS,
3482 MLXSW_REG_SBXX_DIR_EGRESS,
2991}; 3483};
2992 3484
2993/* reg_sbpr_dir 3485/* reg_sbpr_dir
@@ -3020,7 +3512,7 @@ enum mlxsw_reg_sbpr_mode {
3020MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4); 3512MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
3021 3513
3022static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool, 3514static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
3023 enum mlxsw_reg_sbpr_dir dir, 3515 enum mlxsw_reg_sbxx_dir dir,
3024 enum mlxsw_reg_sbpr_mode mode, u32 size) 3516 enum mlxsw_reg_sbpr_mode mode, u32 size)
3025{ 3517{
3026 MLXSW_REG_ZERO(sbpr, payload); 3518 MLXSW_REG_ZERO(sbpr, payload);
@@ -3062,11 +3554,6 @@ MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
3062 */ 3554 */
3063MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6); 3555MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
3064 3556
3065enum mlxsw_reg_sbcm_dir {
3066 MLXSW_REG_SBCM_DIR_INGRESS,
3067 MLXSW_REG_SBCM_DIR_EGRESS,
3068};
3069
3070/* reg_sbcm_dir 3557/* reg_sbcm_dir
3071 * Direction. 3558 * Direction.
3072 * Access: Index 3559 * Access: Index
@@ -3099,7 +3586,7 @@ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
3099MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4); 3586MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
3100 3587
3101static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, 3588static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
3102 enum mlxsw_reg_sbcm_dir dir, 3589 enum mlxsw_reg_sbxx_dir dir,
3103 u32 min_buff, u32 max_buff, u8 pool) 3590 u32 min_buff, u32 max_buff, u8 pool)
3104{ 3591{
3105 MLXSW_REG_ZERO(sbcm, payload); 3592 MLXSW_REG_ZERO(sbcm, payload);
@@ -3111,8 +3598,8 @@ static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
3111 mlxsw_reg_sbcm_pool_set(payload, pool); 3598 mlxsw_reg_sbcm_pool_set(payload, pool);
3112} 3599}
3113 3600
3114/* SBPM - Shared Buffer Class Management Register 3601/* SBPM - Shared Buffer Port Management Register
3115 * ---------------------------------------------- 3602 * ---------------------------------------------
3116 * The SBPM register configures and retrieves the shared buffer allocation 3603 * The SBPM register configures and retrieves the shared buffer allocation
3117 * and configuration according to Port-Pool, including the definition 3604 * and configuration according to Port-Pool, including the definition
3118 * of the associated quota. 3605 * of the associated quota.
@@ -3139,11 +3626,6 @@ MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
3139 */ 3626 */
3140MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4); 3627MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
3141 3628
3142enum mlxsw_reg_sbpm_dir {
3143 MLXSW_REG_SBPM_DIR_INGRESS,
3144 MLXSW_REG_SBPM_DIR_EGRESS,
3145};
3146
3147/* reg_sbpm_dir 3629/* reg_sbpm_dir
3148 * Direction. 3630 * Direction.
3149 * Access: Index 3631 * Access: Index
@@ -3170,7 +3652,7 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
3170MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24); 3652MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
3171 3653
3172static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool, 3654static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
3173 enum mlxsw_reg_sbpm_dir dir, 3655 enum mlxsw_reg_sbxx_dir dir,
3174 u32 min_buff, u32 max_buff) 3656 u32 min_buff, u32 max_buff)
3175{ 3657{
3176 MLXSW_REG_ZERO(sbpm, payload); 3658 MLXSW_REG_ZERO(sbpm, payload);
@@ -3283,6 +3765,10 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
3283 return "SFMR"; 3765 return "SFMR";
3284 case MLXSW_REG_SPVMLR_ID: 3766 case MLXSW_REG_SPVMLR_ID:
3285 return "SPVMLR"; 3767 return "SPVMLR";
3768 case MLXSW_REG_QTCT_ID:
3769 return "QTCT";
3770 case MLXSW_REG_QEEC_ID:
3771 return "QEEC";
3286 case MLXSW_REG_PMLP_ID: 3772 case MLXSW_REG_PMLP_ID:
3287 return "PMLP"; 3773 return "PMLP";
3288 case MLXSW_REG_PMTU_ID: 3774 case MLXSW_REG_PMTU_ID:
@@ -3293,8 +3779,12 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
3293 return "PPAD"; 3779 return "PPAD";
3294 case MLXSW_REG_PAOS_ID: 3780 case MLXSW_REG_PAOS_ID:
3295 return "PAOS"; 3781 return "PAOS";
3782 case MLXSW_REG_PFCC_ID:
3783 return "PFCC";
3296 case MLXSW_REG_PPCNT_ID: 3784 case MLXSW_REG_PPCNT_ID:
3297 return "PPCNT"; 3785 return "PPCNT";
3786 case MLXSW_REG_PPTB_ID:
3787 return "PPTB";
3298 case MLXSW_REG_PBMC_ID: 3788 case MLXSW_REG_PBMC_ID:
3299 return "PBMC"; 3789 return "PBMC";
3300 case MLXSW_REG_PSPA_ID: 3790 case MLXSW_REG_PSPA_ID:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 4afbc3e9e381..19b3c144abc6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -49,7 +49,7 @@
49#include <linux/jiffies.h> 49#include <linux/jiffies.h>
50#include <linux/bitops.h> 50#include <linux/bitops.h>
51#include <linux/list.h> 51#include <linux/list.h>
52#include <net/devlink.h> 52#include <linux/dcbnl.h>
53#include <net/switchdev.h> 53#include <net/switchdev.h>
54#include <generated/utsrelease.h> 54#include <generated/utsrelease.h>
55 55
@@ -305,9 +305,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
306} 306}
307 307
308static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 308static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
309 u8 local_port, u8 *p_module, 309 u8 local_port, u8 *p_module,
310 u8 *p_width) 310 u8 *p_width, u8 *p_lane)
311{ 311{
312 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 312 char pmlp_pl[MLXSW_REG_PMLP_LEN];
313 int err; 313 int err;
@@ -318,9 +318,20 @@ static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
318 return err; 318 return err;
319 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 319 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
320 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 320 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
321 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
321 return 0; 322 return 0;
322} 323}
323 324
325static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
326 u8 local_port, u8 *p_module,
327 u8 *p_width)
328{
329 u8 lane;
330
331 return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
332 p_width, &lane);
333}
334
324static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, 335static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
325 u8 module, u8 width, u8 lane) 336 u8 module, u8 width, u8 lane)
326{ 337{
@@ -379,7 +390,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
379 u64 len; 390 u64 len;
380 int err; 391 int err;
381 392
382 if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info)) 393 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
383 return NETDEV_TX_BUSY; 394 return NETDEV_TX_BUSY;
384 395
385 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 396 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
@@ -403,7 +414,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
403 /* Due to a race we might fail here because of a full queue. In that 414 /* Due to a race we might fail here because of a full queue. In that
404 * unlikely case we simply drop the packet. 415 * unlikely case we simply drop the packet.
405 */ 416 */
406 err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info); 417 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
407 418
408 if (!err) { 419 if (!err) {
409 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 420 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
@@ -438,16 +449,89 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
438 return 0; 449 return 0;
439} 450}
440 451
452static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
453 bool pause_en, bool pfc_en, u16 delay)
454{
455 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
456
457 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
458 MLXSW_SP_PAUSE_DELAY;
459
460 if (pause_en || pfc_en)
461 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
462 pg_size + delay, pg_size);
463 else
464 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
465}
466
467int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
468 u8 *prio_tc, bool pause_en,
469 struct ieee_pfc *my_pfc)
470{
471 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
472 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
473 u16 delay = !!my_pfc ? my_pfc->delay : 0;
474 char pbmc_pl[MLXSW_REG_PBMC_LEN];
475 int i, j, err;
476
477 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
478 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
479 if (err)
480 return err;
481
482 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
483 bool configure = false;
484 bool pfc = false;
485
486 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
487 if (prio_tc[j] == i) {
488 pfc = pfc_en & BIT(j);
489 configure = true;
490 break;
491 }
492 }
493
494 if (!configure)
495 continue;
496 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
497 }
498
499 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
500}
501
502static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
503 int mtu, bool pause_en)
504{
505 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
506 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
507 struct ieee_pfc *my_pfc;
508 u8 *prio_tc;
509
510 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
511 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
512
513 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
514 pause_en, my_pfc);
515}
516
441static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 517static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
442{ 518{
443 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 519 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
520 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
444 int err; 521 int err;
445 522
446 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 523 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
447 if (err) 524 if (err)
448 return err; 525 return err;
526 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
527 if (err)
528 goto err_port_mtu_set;
449 dev->mtu = mtu; 529 dev->mtu = mtu;
450 return 0; 530 return 0;
531
532err_port_mtu_set:
533 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
534 return err;
451} 535}
452 536
453static struct rtnl_link_stats64 * 537static struct rtnl_link_stats64 *
@@ -861,6 +945,33 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
861 return 0; 945 return 0;
862} 946}
863 947
948static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
949 size_t len)
950{
951 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
952 u8 module, width, lane;
953 int err;
954
955 err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
956 mlxsw_sp_port->local_port,
957 &module, &width, &lane);
958 if (err) {
959 netdev_err(dev, "Failed to retrieve module information\n");
960 return err;
961 }
962
963 if (!mlxsw_sp_port->split)
964 err = snprintf(name, len, "p%d", module + 1);
965 else
966 err = snprintf(name, len, "p%ds%d", module + 1,
967 lane / width);
968
969 if (err >= len)
970 return -EINVAL;
971
972 return 0;
973}
974
864static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 975static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
865 .ndo_open = mlxsw_sp_port_open, 976 .ndo_open = mlxsw_sp_port_open,
866 .ndo_stop = mlxsw_sp_port_stop, 977 .ndo_stop = mlxsw_sp_port_stop,
@@ -877,6 +988,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
877 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 988 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
878 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 989 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
879 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 990 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
991 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
880}; 992};
881 993
882static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 994static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -897,6 +1009,68 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
897 sizeof(drvinfo->bus_info)); 1009 sizeof(drvinfo->bus_info));
898} 1010}
899 1011
1012static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1013 struct ethtool_pauseparam *pause)
1014{
1015 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1016
1017 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1018 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1019}
1020
1021static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1022 struct ethtool_pauseparam *pause)
1023{
1024 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1025
1026 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1027 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1028 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1029
1030 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1031 pfcc_pl);
1032}
1033
1034static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1035 struct ethtool_pauseparam *pause)
1036{
1037 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1038 bool pause_en = pause->tx_pause || pause->rx_pause;
1039 int err;
1040
1041 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1042 netdev_err(dev, "PFC already enabled on port\n");
1043 return -EINVAL;
1044 }
1045
1046 if (pause->autoneg) {
1047 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1048 return -EINVAL;
1049 }
1050
1051 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1052 if (err) {
1053 netdev_err(dev, "Failed to configure port's headroom\n");
1054 return err;
1055 }
1056
1057 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1058 if (err) {
1059 netdev_err(dev, "Failed to set PAUSE parameters\n");
1060 goto err_port_pause_configure;
1061 }
1062
1063 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1064 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1065
1066 return 0;
1067
1068err_port_pause_configure:
1069 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1070 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1071 return err;
1072}
1073
900struct mlxsw_sp_port_hw_stats { 1074struct mlxsw_sp_port_hw_stats {
901 char str[ETH_GSTRING_LEN]; 1075 char str[ETH_GSTRING_LEN];
902 u64 (*getter)(char *payload); 1076 u64 (*getter)(char *payload);
@@ -1032,7 +1206,8 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
1032 int i; 1206 int i;
1033 int err; 1207 int err;
1034 1208
1035 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port); 1209 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1210 MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1036 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1211 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1037 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) 1212 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1038 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; 1213 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
@@ -1380,6 +1555,8 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
1380static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 1555static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1381 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 1556 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
1382 .get_link = ethtool_op_get_link, 1557 .get_link = ethtool_op_get_link,
1558 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
1559 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
1383 .get_strings = mlxsw_sp_port_get_strings, 1560 .get_strings = mlxsw_sp_port_get_strings,
1384 .set_phys_id = mlxsw_sp_port_set_phys_id, 1561 .set_phys_id = mlxsw_sp_port_set_phys_id,
1385 .get_ethtool_stats = mlxsw_sp_port_get_stats, 1562 .get_ethtool_stats = mlxsw_sp_port_get_stats,
@@ -1402,12 +1579,112 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1402 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1579 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1403} 1580}
1404 1581
1582int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1583 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1584 bool dwrr, u8 dwrr_weight)
1585{
1586 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1587 char qeec_pl[MLXSW_REG_QEEC_LEN];
1588
1589 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1590 next_index);
1591 mlxsw_reg_qeec_de_set(qeec_pl, true);
1592 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1593 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1594 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1595}
1596
1597int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1598 enum mlxsw_reg_qeec_hr hr, u8 index,
1599 u8 next_index, u32 maxrate)
1600{
1601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1602 char qeec_pl[MLXSW_REG_QEEC_LEN];
1603
1604 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1605 next_index);
1606 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1607 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1608 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1609}
1610
1611int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1612 u8 switch_prio, u8 tclass)
1613{
1614 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1615 char qtct_pl[MLXSW_REG_QTCT_LEN];
1616
1617 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1618 tclass);
1619 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1620}
1621
1622static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1623{
1624 int err, i;
1625
1626 /* Setup the elements hierarcy, so that each TC is linked to
1627 * one subgroup, which are all member in the same group.
1628 */
1629 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1630 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1631 0);
1632 if (err)
1633 return err;
1634 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1635 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1636 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1637 0, false, 0);
1638 if (err)
1639 return err;
1640 }
1641 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1642 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1643 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1644 false, 0);
1645 if (err)
1646 return err;
1647 }
1648
1649 /* Make sure the max shaper is disabled in all hierarcies that
1650 * support it.
1651 */
1652 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1653 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1654 MLXSW_REG_QEEC_MAS_DIS);
1655 if (err)
1656 return err;
1657 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1658 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1659 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1660 i, 0,
1661 MLXSW_REG_QEEC_MAS_DIS);
1662 if (err)
1663 return err;
1664 }
1665 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1666 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1667 MLXSW_REG_QEEC_HIERARCY_TC,
1668 i, i,
1669 MLXSW_REG_QEEC_MAS_DIS);
1670 if (err)
1671 return err;
1672 }
1673
1674 /* Map all priorities to traffic class 0. */
1675 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1676 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1677 if (err)
1678 return err;
1679 }
1680
1681 return 0;
1682}
1683
1405static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1684static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1406 bool split, u8 module, u8 width) 1685 bool split, u8 module, u8 width)
1407{ 1686{
1408 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
1409 struct mlxsw_sp_port *mlxsw_sp_port; 1687 struct mlxsw_sp_port *mlxsw_sp_port;
1410 struct devlink_port *devlink_port;
1411 struct net_device *dev; 1688 struct net_device *dev;
1412 size_t bytes; 1689 size_t bytes;
1413 int err; 1690 int err;
@@ -1460,16 +1737,6 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1460 */ 1737 */
1461 dev->hard_header_len += MLXSW_TXHDR_LEN; 1738 dev->hard_header_len += MLXSW_TXHDR_LEN;
1462 1739
1463 devlink_port = &mlxsw_sp_port->devlink_port;
1464 if (mlxsw_sp_port->split)
1465 devlink_port_split_set(devlink_port, module);
1466 err = devlink_port_register(devlink, devlink_port, local_port);
1467 if (err) {
1468 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
1469 mlxsw_sp_port->local_port);
1470 goto err_devlink_port_register;
1471 }
1472
1473 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1740 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1474 if (err) { 1741 if (err) {
1475 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1742 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1509,6 +1776,21 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1509 goto err_port_buffers_init; 1776 goto err_port_buffers_init;
1510 } 1777 }
1511 1778
1779 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1780 if (err) {
1781 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1782 mlxsw_sp_port->local_port);
1783 goto err_port_ets_init;
1784 }
1785
1786 /* ETS and buffers must be initialized before DCB. */
1787 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1788 if (err) {
1789 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1790 mlxsw_sp_port->local_port);
1791 goto err_port_dcb_init;
1792 }
1793
1512 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 1794 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1513 err = register_netdev(dev); 1795 err = register_netdev(dev);
1514 if (err) { 1796 if (err) {
@@ -1517,7 +1799,14 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1517 goto err_register_netdev; 1799 goto err_register_netdev;
1518 } 1800 }
1519 1801
1520 devlink_port_type_eth_set(devlink_port, dev); 1802 err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1803 mlxsw_sp_port->local_port, dev,
1804 mlxsw_sp_port->split, module);
1805 if (err) {
1806 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1807 mlxsw_sp_port->local_port);
1808 goto err_core_port_init;
1809 }
1521 1810
1522 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); 1811 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1523 if (err) 1812 if (err)
@@ -1527,16 +1816,18 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1527 return 0; 1816 return 0;
1528 1817
1529err_port_vlan_init: 1818err_port_vlan_init:
1819 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1820err_core_port_init:
1530 unregister_netdev(dev); 1821 unregister_netdev(dev);
1531err_register_netdev: 1822err_register_netdev:
1823err_port_dcb_init:
1824err_port_ets_init:
1532err_port_buffers_init: 1825err_port_buffers_init:
1533err_port_admin_status_set: 1826err_port_admin_status_set:
1534err_port_mtu_set: 1827err_port_mtu_set:
1535err_port_speed_by_width_set: 1828err_port_speed_by_width_set:
1536err_port_swid_set: 1829err_port_swid_set:
1537err_port_system_port_mapping_set: 1830err_port_system_port_mapping_set:
1538 devlink_port_unregister(&mlxsw_sp_port->devlink_port);
1539err_devlink_port_register:
1540err_dev_addr_init: 1831err_dev_addr_init:
1541 free_percpu(mlxsw_sp_port->pcpu_stats); 1832 free_percpu(mlxsw_sp_port->pcpu_stats);
1542err_alloc_stats: 1833err_alloc_stats:
@@ -1590,15 +1881,13 @@ static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1590static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1881static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1591{ 1882{
1592 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1883 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1593 struct devlink_port *devlink_port;
1594 1884
1595 if (!mlxsw_sp_port) 1885 if (!mlxsw_sp_port)
1596 return; 1886 return;
1597 mlxsw_sp->ports[local_port] = NULL; 1887 mlxsw_sp->ports[local_port] = NULL;
1598 devlink_port = &mlxsw_sp_port->devlink_port; 1888 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1599 devlink_port_type_clear(devlink_port);
1600 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1889 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1601 devlink_port_unregister(devlink_port); 1890 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1602 mlxsw_sp_port_vports_fini(mlxsw_sp_port); 1891 mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1603 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 1892 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1604 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1893 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
@@ -1659,9 +1948,10 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1659 return local_port - offset; 1948 return local_port - offset;
1660} 1949}
1661 1950
1662static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count) 1951static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1952 unsigned int count)
1663{ 1953{
1664 struct mlxsw_sp *mlxsw_sp = priv; 1954 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1665 struct mlxsw_sp_port *mlxsw_sp_port; 1955 struct mlxsw_sp_port *mlxsw_sp_port;
1666 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 1956 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1667 u8 module, cur_width, base_port; 1957 u8 module, cur_width, base_port;
@@ -1733,9 +2023,9 @@ err_port_create:
1733 return err; 2023 return err;
1734} 2024}
1735 2025
1736static int mlxsw_sp_port_unsplit(void *priv, u8 local_port) 2026static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
1737{ 2027{
1738 struct mlxsw_sp *mlxsw_sp = priv; 2028 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1739 struct mlxsw_sp_port *mlxsw_sp_port; 2029 struct mlxsw_sp_port *mlxsw_sp_port;
1740 u8 module, cur_width, base_port; 2030 u8 module, cur_width, base_port;
1741 unsigned int count; 2031 unsigned int count;
@@ -2080,10 +2370,10 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2080 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2370 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2081} 2371}
2082 2372
2083static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, 2373static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2084 const struct mlxsw_bus_info *mlxsw_bus_info) 2374 const struct mlxsw_bus_info *mlxsw_bus_info)
2085{ 2375{
2086 struct mlxsw_sp *mlxsw_sp = priv; 2376 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2087 int err; 2377 int err;
2088 2378
2089 mlxsw_sp->core = mlxsw_core; 2379 mlxsw_sp->core = mlxsw_core;
@@ -2154,9 +2444,9 @@ err_event_register:
2154 return err; 2444 return err;
2155} 2445}
2156 2446
2157static void mlxsw_sp_fini(void *priv) 2447static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2158{ 2448{
2159 struct mlxsw_sp *mlxsw_sp = priv; 2449 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2160 2450
2161 mlxsw_sp_switchdev_fini(mlxsw_sp); 2451 mlxsw_sp_switchdev_fini(mlxsw_sp);
2162 mlxsw_sp_traps_fini(mlxsw_sp); 2452 mlxsw_sp_traps_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4b8abaf06321..361b0c270b56 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -42,15 +42,15 @@
42#include <linux/bitops.h> 42#include <linux/bitops.h>
43#include <linux/if_vlan.h> 43#include <linux/if_vlan.h>
44#include <linux/list.h> 44#include <linux/list.h>
45#include <linux/dcbnl.h>
45#include <net/switchdev.h> 46#include <net/switchdev.h>
46#include <net/devlink.h>
47 47
48#include "port.h" 48#include "port.h"
49#include "core.h" 49#include "core.h"
50 50
51#define MLXSW_SP_VFID_BASE VLAN_N_VID 51#define MLXSW_SP_VFID_BASE VLAN_N_VID
52#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */ 52#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */
53#define MLXSW_SP_VFID_BR_MAX 8192 /* Bridged VLAN interfaces */ 53#define MLXSW_SP_VFID_BR_MAX 6144 /* Bridged VLAN interfaces */
54#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX) 54#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX)
55 55
56#define MLXSW_SP_LAG_MAX 64 56#define MLXSW_SP_LAG_MAX 64
@@ -62,6 +62,23 @@
62 62
63#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ 63#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
64 64
65#define MLXSW_SP_BYTES_PER_CELL 96
66
67#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
68
69/* Maximum delay buffer needed in case of PAUSE frames, in cells.
70 * Assumes 100m cable and maximum MTU.
71 */
72#define MLXSW_SP_PAUSE_DELAY 612
73
74#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
75
76static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
77{
78 delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
79 return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
80}
81
65struct mlxsw_sp_port; 82struct mlxsw_sp_port;
66 83
67struct mlxsw_sp_upper { 84struct mlxsw_sp_upper {
@@ -148,6 +165,7 @@ struct mlxsw_sp_port_pcpu_stats {
148}; 165};
149 166
150struct mlxsw_sp_port { 167struct mlxsw_sp_port {
168 struct mlxsw_core_port core_port; /* must be first */
151 struct net_device *dev; 169 struct net_device *dev;
152 struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; 170 struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
153 struct mlxsw_sp *mlxsw_sp; 171 struct mlxsw_sp *mlxsw_sp;
@@ -166,14 +184,28 @@ struct mlxsw_sp_port {
166 struct mlxsw_sp_vfid *vfid; 184 struct mlxsw_sp_vfid *vfid;
167 u16 vid; 185 u16 vid;
168 } vport; 186 } vport;
187 struct {
188 u8 tx_pause:1,
189 rx_pause:1;
190 } link;
191 struct {
192 struct ieee_ets *ets;
193 struct ieee_maxrate *maxrate;
194 struct ieee_pfc *pfc;
195 } dcb;
169 /* 802.1Q bridge VLANs */ 196 /* 802.1Q bridge VLANs */
170 unsigned long *active_vlans; 197 unsigned long *active_vlans;
171 unsigned long *untagged_vlans; 198 unsigned long *untagged_vlans;
172 /* VLAN interfaces */ 199 /* VLAN interfaces */
173 struct list_head vports_list; 200 struct list_head vports_list;
174 struct devlink_port devlink_port;
175}; 201};
176 202
203static inline bool
204mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
205{
206 return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
207}
208
177static inline struct mlxsw_sp_port * 209static inline struct mlxsw_sp_port *
178mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) 210mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
179{ 211{
@@ -265,5 +297,33 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
265 bool set, bool only_uc); 297 bool set, bool only_uc);
266void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 298void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
267int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); 299int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
300int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
301 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
302 bool dwrr, u8 dwrr_weight);
303int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
304 u8 switch_prio, u8 tclass);
305int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
306 u8 *prio_tc, bool pause_en,
307 struct ieee_pfc *my_pfc);
308int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
309 enum mlxsw_reg_qeec_hr hr, u8 index,
310 u8 next_index, u32 maxrate);
311
312#ifdef CONFIG_MLXSW_SPECTRUM_DCB
313
314int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
315void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
316
317#else
318
319static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
320{
321 return 0;
322}
323
324static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
325{}
326
327#endif
268 328
269#endif 329#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index d59195e3f7fb..f58b1d3a619a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -34,6 +34,8 @@
34 34
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/dcbnl.h>
38#include <linux/if_ether.h>
37 39
38#include "spectrum.h" 40#include "spectrum.h"
39#include "core.h" 41#include "core.h"
@@ -52,15 +54,15 @@ struct mlxsw_sp_pb {
52 } 54 }
53 55
54static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = { 56static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = {
55 MLXSW_SP_PB(0, 208), 57 MLXSW_SP_PB(0, 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN)),
56 MLXSW_SP_PB(1, 208), 58 MLXSW_SP_PB(1, 0),
57 MLXSW_SP_PB(2, 208), 59 MLXSW_SP_PB(2, 0),
58 MLXSW_SP_PB(3, 208), 60 MLXSW_SP_PB(3, 0),
59 MLXSW_SP_PB(4, 208), 61 MLXSW_SP_PB(4, 0),
60 MLXSW_SP_PB(5, 208), 62 MLXSW_SP_PB(5, 0),
61 MLXSW_SP_PB(6, 208), 63 MLXSW_SP_PB(6, 0),
62 MLXSW_SP_PB(7, 208), 64 MLXSW_SP_PB(7, 0),
63 MLXSW_SP_PB(9, 208), 65 MLXSW_SP_PB(9, 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU)),
64}; 66};
65 67
66#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) 68#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
@@ -78,25 +80,45 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
78 pb = &mlxsw_sp_pbs[i]; 80 pb = &mlxsw_sp_pbs[i];
79 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size); 81 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size);
80 } 82 }
83 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
84 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
81 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, 85 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
82 MLXSW_REG(pbmc), pbmc_pl); 86 MLXSW_REG(pbmc), pbmc_pl);
83} 87}
84 88
85#define MLXSW_SP_SB_BYTES_PER_CELL 96 89static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
90{
91 char pptb_pl[MLXSW_REG_PPTB_LEN];
92 int i;
93
94 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
95 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
96 mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0);
97 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
98 pptb_pl);
99}
100
101static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
102{
103 int err;
104
105 err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
106 if (err)
107 return err;
108 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
109}
86 110
87struct mlxsw_sp_sb_pool { 111struct mlxsw_sp_sb_pool {
88 u8 pool; 112 u8 pool;
89 enum mlxsw_reg_sbpr_dir dir; 113 enum mlxsw_reg_sbxx_dir dir;
90 enum mlxsw_reg_sbpr_mode mode; 114 enum mlxsw_reg_sbpr_mode mode;
91 u32 size; 115 u32 size;
92}; 116};
93 117
94#define MLXSW_SP_SB_POOL_INGRESS_SIZE \ 118#define MLXSW_SP_SB_POOL_INGRESS_SIZE \
95 ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) / \ 119 (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
96 MLXSW_SP_SB_BYTES_PER_CELL)
97#define MLXSW_SP_SB_POOL_EGRESS_SIZE \ 120#define MLXSW_SP_SB_POOL_EGRESS_SIZE \
98 ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) / \ 121 (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
99 MLXSW_SP_SB_BYTES_PER_CELL)
100 122
101#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \ 123#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \
102 { \ 124 { \
@@ -107,22 +129,22 @@ struct mlxsw_sp_sb_pool {
107 } 129 }
108 130
109#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \ 131#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \
110 MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS, \ 132 MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBXX_DIR_INGRESS, \
111 MLXSW_REG_SBPR_MODE_DYNAMIC, _size) 133 MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
112 134
113#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \ 135#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \
114 MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS, \ 136 MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBXX_DIR_EGRESS, \
115 MLXSW_REG_SBPR_MODE_DYNAMIC, _size) 137 MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
116 138
117static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = { 139static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = {
118 MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE), 140 MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_INGRESS_SIZE)),
119 MLXSW_SP_SB_POOL_INGRESS(1, 0), 141 MLXSW_SP_SB_POOL_INGRESS(1, 0),
120 MLXSW_SP_SB_POOL_INGRESS(2, 0), 142 MLXSW_SP_SB_POOL_INGRESS(2, 0),
121 MLXSW_SP_SB_POOL_INGRESS(3, 0), 143 MLXSW_SP_SB_POOL_INGRESS(3, 0),
122 MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE), 144 MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)),
123 MLXSW_SP_SB_POOL_EGRESS(1, 0), 145 MLXSW_SP_SB_POOL_EGRESS(1, 0),
124 MLXSW_SP_SB_POOL_EGRESS(2, 0), 146 MLXSW_SP_SB_POOL_EGRESS(2, 0),
125 MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE), 147 MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)),
126}; 148};
127 149
128#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools) 150#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools)
@@ -151,7 +173,7 @@ struct mlxsw_sp_sb_cm {
151 u8 pg; 173 u8 pg;
152 u8 tc; 174 u8 tc;
153 } u; 175 } u;
154 enum mlxsw_reg_sbcm_dir dir; 176 enum mlxsw_reg_sbxx_dir dir;
155 u32 min_buff; 177 u32 min_buff;
156 u32 max_buff; 178 u32 max_buff;
157 u8 pool; 179 u8 pool;
@@ -167,18 +189,18 @@ struct mlxsw_sp_sb_cm {
167 } 189 }
168 190
169#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \ 191#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \
170 MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS, \ 192 MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBXX_DIR_INGRESS, \
171 _min_buff, _max_buff, 0) 193 _min_buff, _max_buff, 0)
172 194
173#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \ 195#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \
174 MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, \ 196 MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBXX_DIR_EGRESS, \
175 _min_buff, _max_buff, 0) 197 _min_buff, _max_buff, 0)
176 198
177#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \ 199#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \
178 MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3) 200 MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBXX_DIR_EGRESS, 104, 2, 3)
179 201
180static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = { 202static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
181 MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8), 203 MLXSW_SP_SB_CM_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(10000), 8),
182 MLXSW_SP_SB_CM_INGRESS(1, 0, 0), 204 MLXSW_SP_SB_CM_INGRESS(1, 0, 0),
183 MLXSW_SP_SB_CM_INGRESS(2, 0, 0), 205 MLXSW_SP_SB_CM_INGRESS(2, 0, 0),
184 MLXSW_SP_SB_CM_INGRESS(3, 0, 0), 206 MLXSW_SP_SB_CM_INGRESS(3, 0, 0),
@@ -186,15 +208,15 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
186 MLXSW_SP_SB_CM_INGRESS(5, 0, 0), 208 MLXSW_SP_SB_CM_INGRESS(5, 0, 0),
187 MLXSW_SP_SB_CM_INGRESS(6, 0, 0), 209 MLXSW_SP_SB_CM_INGRESS(6, 0, 0),
188 MLXSW_SP_SB_CM_INGRESS(7, 0, 0), 210 MLXSW_SP_SB_CM_INGRESS(7, 0, 0),
189 MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff), 211 MLXSW_SP_SB_CM_INGRESS(9, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff),
190 MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 212 MLXSW_SP_SB_CM_EGRESS(0, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
191 MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 213 MLXSW_SP_SB_CM_EGRESS(1, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
192 MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 214 MLXSW_SP_SB_CM_EGRESS(2, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
193 MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 215 MLXSW_SP_SB_CM_EGRESS(3, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
194 MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 216 MLXSW_SP_SB_CM_EGRESS(4, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
195 MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 217 MLXSW_SP_SB_CM_EGRESS(5, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
196 MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 218 MLXSW_SP_SB_CM_EGRESS(6, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
197 MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), 219 MLXSW_SP_SB_CM_EGRESS(7, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
198 MLXSW_SP_SB_CM_EGRESS(8, 0, 0), 220 MLXSW_SP_SB_CM_EGRESS(8, 0, 0),
199 MLXSW_SP_SB_CM_EGRESS(9, 0, 0), 221 MLXSW_SP_SB_CM_EGRESS(9, 0, 0),
200 MLXSW_SP_SB_CM_EGRESS(10, 0, 0), 222 MLXSW_SP_SB_CM_EGRESS(10, 0, 0),
@@ -282,7 +304,7 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
282 304
283struct mlxsw_sp_sb_pm { 305struct mlxsw_sp_sb_pm {
284 u8 pool; 306 u8 pool;
285 enum mlxsw_reg_sbpm_dir dir; 307 enum mlxsw_reg_sbxx_dir dir;
286 u32 min_buff; 308 u32 min_buff;
287 u32 max_buff; 309 u32 max_buff;
288}; 310};
@@ -296,11 +318,11 @@ struct mlxsw_sp_sb_pm {
296 } 318 }
297 319
298#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \ 320#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \
299 MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS, \ 321 MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBXX_DIR_INGRESS, \
300 _min_buff, _max_buff) 322 _min_buff, _max_buff)
301 323
302#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \ 324#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \
303 MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS, \ 325 MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBXX_DIR_EGRESS, \
304 _min_buff, _max_buff) 326 _min_buff, _max_buff)
305 327
306static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { 328static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
@@ -353,21 +375,21 @@ struct mlxsw_sp_sb_mm {
353 } 375 }
354 376
355static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { 377static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
356 MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 378 MLXSW_SP_SB_MM(0, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
357 MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 379 MLXSW_SP_SB_MM(1, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
358 MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 380 MLXSW_SP_SB_MM(2, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
359 MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 381 MLXSW_SP_SB_MM(3, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
360 MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 382 MLXSW_SP_SB_MM(4, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
361 MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 383 MLXSW_SP_SB_MM(5, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
362 MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 384 MLXSW_SP_SB_MM(6, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
363 MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 385 MLXSW_SP_SB_MM(7, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
364 MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 386 MLXSW_SP_SB_MM(8, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
365 MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 387 MLXSW_SP_SB_MM(9, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
366 MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 388 MLXSW_SP_SB_MM(10, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
367 MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 389 MLXSW_SP_SB_MM(11, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
368 MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 390 MLXSW_SP_SB_MM(12, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
369 MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 391 MLXSW_SP_SB_MM(13, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
370 MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), 392 MLXSW_SP_SB_MM(14, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
371}; 393};
372 394
373#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) 395#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
@@ -410,7 +432,7 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
410{ 432{
411 int err; 433 int err;
412 434
413 err = mlxsw_sp_port_pb_init(mlxsw_sp_port); 435 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
414 if (err) 436 if (err)
415 return err; 437 return err;
416 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port); 438 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
new file mode 100644
index 000000000000..0b323661c0b6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -0,0 +1,480 @@
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/netdevice.h>
36#include <linux/string.h>
37#include <linux/bitops.h>
38#include <net/dcbnl.h>
39
40#include "spectrum.h"
41#include "reg.h"
42
43static u8 mlxsw_sp_dcbnl_getdcbx(struct net_device __always_unused *dev)
44{
45 return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
46}
47
48static u8 mlxsw_sp_dcbnl_setdcbx(struct net_device __always_unused *dev,
49 u8 mode)
50{
51 return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
52}
53
54static int mlxsw_sp_dcbnl_ieee_getets(struct net_device *dev,
55 struct ieee_ets *ets)
56{
57 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
58
59 memcpy(ets, mlxsw_sp_port->dcb.ets, sizeof(*ets));
60
61 return 0;
62}
63
64static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port,
65 struct ieee_ets *ets)
66{
67 struct net_device *dev = mlxsw_sp_port->dev;
68 bool has_ets_tc = false;
69 int i, tx_bw_sum = 0;
70
71 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
72 switch (ets->tc_tsa[i]) {
73 case IEEE_8021QAZ_TSA_STRICT:
74 break;
75 case IEEE_8021QAZ_TSA_ETS:
76 has_ets_tc = true;
77 tx_bw_sum += ets->tc_tx_bw[i];
78 break;
79 default:
80 netdev_err(dev, "Only strict priority and ETS are supported\n");
81 return -EINVAL;
82 }
83
84 if (ets->prio_tc[i] >= IEEE_8021QAZ_MAX_TCS) {
85 netdev_err(dev, "Invalid TC\n");
86 return -EINVAL;
87 }
88 }
89
90 if (has_ets_tc && tx_bw_sum != 100) {
91 netdev_err(dev, "Total ETS bandwidth should equal 100\n");
92 return -EINVAL;
93 }
94
95 return 0;
96}
97
98static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
99 u8 *prio_tc)
100{
101 char pptb_pl[MLXSW_REG_PPTB_LEN];
102 int i;
103
104 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
105 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
106 mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]);
107 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
108 pptb_pl);
109}
110
111static bool mlxsw_sp_ets_has_pg(u8 *prio_tc, u8 pg)
112{
113 int i;
114
115 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
116 if (prio_tc[i] == pg)
117 return true;
118 return false;
119}
120
121static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
122 u8 *old_prio_tc, u8 *new_prio_tc)
123{
124 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
125 char pbmc_pl[MLXSW_REG_PBMC_LEN];
126 int err, i;
127
128 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
129 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
130 if (err)
131 return err;
132
133 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
134 u8 pg = old_prio_tc[i];
135
136 if (!mlxsw_sp_ets_has_pg(new_prio_tc, pg))
137 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg, 0);
138 }
139
140 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
141}
142
143static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
144 struct ieee_ets *ets)
145{
146 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
147 struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
148 struct net_device *dev = mlxsw_sp_port->dev;
149 int err;
150
151 /* Create the required PGs, but don't destroy existing ones, as
152 * traffic is still directed to them.
153 */
154 err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
155 ets->prio_tc, pause_en,
156 mlxsw_sp_port->dcb.pfc);
157 if (err) {
158 netdev_err(dev, "Failed to configure port's headroom\n");
159 return err;
160 }
161
162 err = mlxsw_sp_port_pg_prio_map(mlxsw_sp_port, ets->prio_tc);
163 if (err) {
164 netdev_err(dev, "Failed to set PG-priority mapping\n");
165 goto err_port_prio_pg_map;
166 }
167
168 err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc,
169 ets->prio_tc);
170 if (err)
171 netdev_warn(dev, "Failed to remove ununsed PGs\n");
172
173 return 0;
174
175err_port_prio_pg_map:
176 mlxsw_sp_port_pg_destroy(mlxsw_sp_port, ets->prio_tc, my_ets->prio_tc);
177 return err;
178}
179
180static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
181 struct ieee_ets *ets)
182{
183 struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
184 struct net_device *dev = mlxsw_sp_port->dev;
185 int i, err;
186
187 /* Egress configuration. */
188 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
189 bool dwrr = ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
190 u8 weight = ets->tc_tx_bw[i];
191
192 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
193 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
194 0, dwrr, weight);
195 if (err) {
196 netdev_err(dev, "Failed to link subgroup ETS element %d to group\n",
197 i);
198 goto err_port_ets_set;
199 }
200 }
201
202 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
203 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
204 ets->prio_tc[i]);
205 if (err) {
206 netdev_err(dev, "Failed to map prio %d to TC %d\n", i,
207 ets->prio_tc[i]);
208 goto err_port_prio_tc_set;
209 }
210 }
211
212 /* Ingress configuration. */
213 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, ets);
214 if (err)
215 goto err_port_headroom_set;
216
217 return 0;
218
219err_port_headroom_set:
220 i = IEEE_8021QAZ_MAX_TCS;
221err_port_prio_tc_set:
222 for (i--; i >= 0; i--)
223 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, my_ets->prio_tc[i]);
224 i = IEEE_8021QAZ_MAX_TCS;
225err_port_ets_set:
226 for (i--; i >= 0; i--) {
227 bool dwrr = my_ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
228 u8 weight = my_ets->tc_tx_bw[i];
229
230 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
231 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
232 0, dwrr, weight);
233 }
234 return err;
235}
236
237static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
238 struct ieee_ets *ets)
239{
240 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
241 int err;
242
243 err = mlxsw_sp_port_ets_validate(mlxsw_sp_port, ets);
244 if (err)
245 return err;
246
247 err = __mlxsw_sp_dcbnl_ieee_setets(mlxsw_sp_port, ets);
248 if (err)
249 return err;
250
251 memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
252
253 return 0;
254}
255
256static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev,
257 struct ieee_maxrate *maxrate)
258{
259 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
260
261 memcpy(maxrate, mlxsw_sp_port->dcb.maxrate, sizeof(*maxrate));
262
263 return 0;
264}
265
266static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
267 struct ieee_maxrate *maxrate)
268{
269 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
270 struct ieee_maxrate *my_maxrate = mlxsw_sp_port->dcb.maxrate;
271 int err, i;
272
273 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
274 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
275 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
276 i, 0,
277 maxrate->tc_maxrate[i]);
278 if (err) {
279 netdev_err(dev, "Failed to set maxrate for TC %d\n", i);
280 goto err_port_ets_maxrate_set;
281 }
282 }
283
284 memcpy(mlxsw_sp_port->dcb.maxrate, maxrate, sizeof(*maxrate));
285
286 return 0;
287
288err_port_ets_maxrate_set:
289 for (i--; i >= 0; i--)
290 mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
291 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
292 i, 0, my_maxrate->tc_maxrate[i]);
293 return err;
294}
295
296static int mlxsw_sp_port_pfc_cnt_get(struct mlxsw_sp_port *mlxsw_sp_port,
297 u8 prio)
298{
299 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
300 struct ieee_pfc *my_pfc = mlxsw_sp_port->dcb.pfc;
301 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
302 int err;
303
304 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
305 MLXSW_REG_PPCNT_PRIO_CNT, prio);
306 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
307 if (err)
308 return err;
309
310 my_pfc->requests[prio] = mlxsw_reg_ppcnt_tx_pause_get(ppcnt_pl);
311 my_pfc->indications[prio] = mlxsw_reg_ppcnt_rx_pause_get(ppcnt_pl);
312
313 return 0;
314}
315
316static int mlxsw_sp_dcbnl_ieee_getpfc(struct net_device *dev,
317 struct ieee_pfc *pfc)
318{
319 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
320 int err, i;
321
322 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
323 err = mlxsw_sp_port_pfc_cnt_get(mlxsw_sp_port, i);
324 if (err) {
325 netdev_err(dev, "Failed to get PFC count for priority %d\n",
326 i);
327 return err;
328 }
329 }
330
331 memcpy(pfc, mlxsw_sp_port->dcb.pfc, sizeof(*pfc));
332
333 return 0;
334}
335
336static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
337 struct ieee_pfc *pfc)
338{
339 char pfcc_pl[MLXSW_REG_PFCC_LEN];
340
341 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
342 mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
343
344 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
345 pfcc_pl);
346}
347
348static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
349 struct ieee_pfc *pfc)
350{
351 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
352 int err;
353
354 if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) {
355 netdev_err(dev, "PAUSE frames already enabled on port\n");
356 return -EINVAL;
357 }
358
359 err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
360 mlxsw_sp_port->dcb.ets->prio_tc,
361 false, pfc);
362 if (err) {
363 netdev_err(dev, "Failed to configure port's headroom for PFC\n");
364 return err;
365 }
366
367 err = mlxsw_sp_port_pfc_set(mlxsw_sp_port, pfc);
368 if (err) {
369 netdev_err(dev, "Failed to configure PFC\n");
370 goto err_port_pfc_set;
371 }
372
373 memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
374
375 return 0;
376
377err_port_pfc_set:
378 __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
379 mlxsw_sp_port->dcb.ets->prio_tc, false,
380 mlxsw_sp_port->dcb.pfc);
381 return err;
382}
383
384static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
385 .ieee_getets = mlxsw_sp_dcbnl_ieee_getets,
386 .ieee_setets = mlxsw_sp_dcbnl_ieee_setets,
387 .ieee_getmaxrate = mlxsw_sp_dcbnl_ieee_getmaxrate,
388 .ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate,
389 .ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc,
390 .ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc,
391
392 .getdcbx = mlxsw_sp_dcbnl_getdcbx,
393 .setdcbx = mlxsw_sp_dcbnl_setdcbx,
394};
395
396static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
397{
398 mlxsw_sp_port->dcb.ets = kzalloc(sizeof(*mlxsw_sp_port->dcb.ets),
399 GFP_KERNEL);
400 if (!mlxsw_sp_port->dcb.ets)
401 return -ENOMEM;
402
403 mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
404
405 return 0;
406}
407
408static void mlxsw_sp_port_ets_fini(struct mlxsw_sp_port *mlxsw_sp_port)
409{
410 kfree(mlxsw_sp_port->dcb.ets);
411}
412
413static int mlxsw_sp_port_maxrate_init(struct mlxsw_sp_port *mlxsw_sp_port)
414{
415 int i;
416
417 mlxsw_sp_port->dcb.maxrate = kmalloc(sizeof(*mlxsw_sp_port->dcb.maxrate),
418 GFP_KERNEL);
419 if (!mlxsw_sp_port->dcb.maxrate)
420 return -ENOMEM;
421
422 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
423 mlxsw_sp_port->dcb.maxrate->tc_maxrate[i] = MLXSW_REG_QEEC_MAS_DIS;
424
425 return 0;
426}
427
428static void mlxsw_sp_port_maxrate_fini(struct mlxsw_sp_port *mlxsw_sp_port)
429{
430 kfree(mlxsw_sp_port->dcb.maxrate);
431}
432
433static int mlxsw_sp_port_pfc_init(struct mlxsw_sp_port *mlxsw_sp_port)
434{
435 mlxsw_sp_port->dcb.pfc = kzalloc(sizeof(*mlxsw_sp_port->dcb.pfc),
436 GFP_KERNEL);
437 if (!mlxsw_sp_port->dcb.pfc)
438 return -ENOMEM;
439
440 mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
441
442 return 0;
443}
444
445static void mlxsw_sp_port_pfc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
446{
447 kfree(mlxsw_sp_port->dcb.pfc);
448}
449
450int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
451{
452 int err;
453
454 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
455 if (err)
456 return err;
457 err = mlxsw_sp_port_maxrate_init(mlxsw_sp_port);
458 if (err)
459 goto err_port_maxrate_init;
460 err = mlxsw_sp_port_pfc_init(mlxsw_sp_port);
461 if (err)
462 goto err_port_pfc_init;
463
464 mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops;
465
466 return 0;
467
468err_port_pfc_init:
469 mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
470err_port_maxrate_init:
471 mlxsw_sp_port_ets_fini(mlxsw_sp_port);
472 return err;
473}
474
475void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
476{
477 mlxsw_sp_port_pfc_fini(mlxsw_sp_port);
478 mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
479 mlxsw_sp_port_ets_fini(mlxsw_sp_port);
480}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 7a60a26759b6..3842eab9449a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -43,7 +43,6 @@
43#include <linux/device.h> 43#include <linux/device.h>
44#include <linux/skbuff.h> 44#include <linux/skbuff.h>
45#include <linux/if_vlan.h> 45#include <linux/if_vlan.h>
46#include <net/devlink.h>
47#include <net/switchdev.h> 46#include <net/switchdev.h>
48#include <generated/utsrelease.h> 47#include <generated/utsrelease.h>
49 48
@@ -75,11 +74,11 @@ struct mlxsw_sx_port_pcpu_stats {
75}; 74};
76 75
77struct mlxsw_sx_port { 76struct mlxsw_sx_port {
77 struct mlxsw_core_port core_port; /* must be first */
78 struct net_device *dev; 78 struct net_device *dev;
79 struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats; 79 struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
80 struct mlxsw_sx *mlxsw_sx; 80 struct mlxsw_sx *mlxsw_sx;
81 u8 local_port; 81 u8 local_port;
82 struct devlink_port devlink_port;
83}; 82};
84 83
85/* tx_hdr_version 84/* tx_hdr_version
@@ -303,7 +302,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
303 u64 len; 302 u64 len;
304 int err; 303 int err;
305 304
306 if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info)) 305 if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
307 return NETDEV_TX_BUSY; 306 return NETDEV_TX_BUSY;
308 307
309 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 308 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
@@ -321,7 +320,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
321 /* Due to a race we might fail here because of a full queue. In that 320 /* Due to a race we might fail here because of a full queue. In that
322 * unlikely case we simply drop the packet. 321 * unlikely case we simply drop the packet.
323 */ 322 */
324 err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info); 323 err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
325 324
326 if (!err) { 325 if (!err) {
327 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats); 326 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
@@ -518,7 +517,8 @@ static void mlxsw_sx_port_get_stats(struct net_device *dev,
518 int i; 517 int i;
519 int err; 518 int err;
520 519
521 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port); 520 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
521 MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
522 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl); 522 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
523 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) 523 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
524 data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0; 524 data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
@@ -955,9 +955,7 @@ mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
955 955
956static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) 956static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
957{ 957{
958 struct devlink *devlink = priv_to_devlink(mlxsw_sx->core);
959 struct mlxsw_sx_port *mlxsw_sx_port; 958 struct mlxsw_sx_port *mlxsw_sx_port;
960 struct devlink_port *devlink_port;
961 struct net_device *dev; 959 struct net_device *dev;
962 bool usable; 960 bool usable;
963 int err; 961 int err;
@@ -1011,14 +1009,6 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1011 goto port_not_usable; 1009 goto port_not_usable;
1012 } 1010 }
1013 1011
1014 devlink_port = &mlxsw_sx_port->devlink_port;
1015 err = devlink_port_register(devlink, devlink_port, local_port);
1016 if (err) {
1017 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register devlink port\n",
1018 mlxsw_sx_port->local_port);
1019 goto err_devlink_port_register;
1020 }
1021
1022 err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port); 1012 err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1023 if (err) { 1013 if (err) {
1024 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1014 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1076,11 +1066,19 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1076 goto err_register_netdev; 1066 goto err_register_netdev;
1077 } 1067 }
1078 1068
1079 devlink_port_type_eth_set(devlink_port, dev); 1069 err = mlxsw_core_port_init(mlxsw_sx->core, &mlxsw_sx_port->core_port,
1070 mlxsw_sx_port->local_port, dev, false, 0);
1071 if (err) {
1072 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
1073 mlxsw_sx_port->local_port);
1074 goto err_core_port_init;
1075 }
1080 1076
1081 mlxsw_sx->ports[local_port] = mlxsw_sx_port; 1077 mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1082 return 0; 1078 return 0;
1083 1079
1080err_core_port_init:
1081 unregister_netdev(dev);
1084err_register_netdev: 1082err_register_netdev:
1085err_port_mac_learning_mode_set: 1083err_port_mac_learning_mode_set:
1086err_port_stp_state_set: 1084err_port_stp_state_set:
@@ -1089,8 +1087,6 @@ err_port_mtu_set:
1089err_port_speed_set: 1087err_port_speed_set:
1090err_port_swid_set: 1088err_port_swid_set:
1091err_port_system_port_mapping_set: 1089err_port_system_port_mapping_set:
1092 devlink_port_unregister(&mlxsw_sx_port->devlink_port);
1093err_devlink_port_register:
1094port_not_usable: 1090port_not_usable:
1095err_port_module_check: 1091err_port_module_check:
1096err_dev_addr_get: 1092err_dev_addr_get:
@@ -1103,15 +1099,12 @@ err_alloc_stats:
1103static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) 1099static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1104{ 1100{
1105 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; 1101 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1106 struct devlink_port *devlink_port;
1107 1102
1108 if (!mlxsw_sx_port) 1103 if (!mlxsw_sx_port)
1109 return; 1104 return;
1110 devlink_port = &mlxsw_sx_port->devlink_port; 1105 mlxsw_core_port_fini(&mlxsw_sx_port->core_port);
1111 devlink_port_type_clear(devlink_port);
1112 unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */ 1106 unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
1113 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); 1107 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1114 devlink_port_unregister(devlink_port);
1115 free_percpu(mlxsw_sx_port->pcpu_stats); 1108 free_percpu(mlxsw_sx_port->pcpu_stats);
1116 free_netdev(mlxsw_sx_port->dev); 1109 free_netdev(mlxsw_sx_port->dev);
1117} 1110}
@@ -1454,10 +1447,10 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
1454 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl); 1447 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
1455} 1448}
1456 1449
1457static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core, 1450static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
1458 const struct mlxsw_bus_info *mlxsw_bus_info) 1451 const struct mlxsw_bus_info *mlxsw_bus_info)
1459{ 1452{
1460 struct mlxsw_sx *mlxsw_sx = priv; 1453 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1461 int err; 1454 int err;
1462 1455
1463 mlxsw_sx->core = mlxsw_core; 1456 mlxsw_sx->core = mlxsw_core;
@@ -1504,9 +1497,9 @@ err_event_register:
1504 return err; 1497 return err;
1505} 1498}
1506 1499
1507static void mlxsw_sx_fini(void *priv) 1500static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
1508{ 1501{
1509 struct mlxsw_sx *mlxsw_sx = priv; 1502 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1510 1503
1511 mlxsw_sx_traps_fini(mlxsw_sx); 1504 mlxsw_sx_traps_fini(mlxsw_sx);
1512 mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE); 1505 mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index ab264e1bccd0..3d53fcf323eb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -45,7 +45,7 @@
45#include <linux/interrupt.h> 45#include <linux/interrupt.h>
46#include <linux/netdevice.h> 46#include <linux/netdevice.h>
47#include <linux/pci.h> 47#include <linux/pci.h>
48#include <asm-generic/io-64-nonatomic-hi-lo.h> 48#include <linux/io-64-nonatomic-hi-lo.h>
49 49
50#include "nfp_net_ctrl.h" 50#include "nfp_net_ctrl.h"
51 51
@@ -298,6 +298,8 @@ struct nfp_net_rx_buf {
298 * @rxds: Virtual address of FL/RX ring in host memory 298 * @rxds: Virtual address of FL/RX ring in host memory
299 * @dma: DMA address of the FL/RX ring 299 * @dma: DMA address of the FL/RX ring
300 * @size: Size, in bytes, of the FL/RX ring (needed to free) 300 * @size: Size, in bytes, of the FL/RX ring (needed to free)
301 * @bufsz: Buffer allocation size for convenience of management routines
302 * (NOTE: this is in second cache line, do not use on fast path!)
301 */ 303 */
302struct nfp_net_rx_ring { 304struct nfp_net_rx_ring {
303 struct nfp_net_r_vector *r_vec; 305 struct nfp_net_r_vector *r_vec;
@@ -319,6 +321,7 @@ struct nfp_net_rx_ring {
319 321
320 dma_addr_t dma; 322 dma_addr_t dma;
321 unsigned int size; 323 unsigned int size;
324 unsigned int bufsz;
322} ____cacheline_aligned; 325} ____cacheline_aligned;
323 326
324/** 327/**
@@ -472,6 +475,9 @@ struct nfp_net {
472 475
473 u32 rx_offset; 476 u32 rx_offset;
474 477
478 struct nfp_net_tx_ring *tx_rings;
479 struct nfp_net_rx_ring *rx_rings;
480
475#ifdef CONFIG_PCI_IOV 481#ifdef CONFIG_PCI_IOV
476 unsigned int num_vfs; 482 unsigned int num_vfs;
477 struct vf_data_storage *vfinfo; 483 struct vf_data_storage *vfinfo;
@@ -504,9 +510,6 @@ struct nfp_net {
504 int txd_cnt; 510 int txd_cnt;
505 int rxd_cnt; 511 int rxd_cnt;
506 512
507 struct nfp_net_tx_ring tx_rings[NFP_NET_MAX_TX_RINGS];
508 struct nfp_net_rx_ring rx_rings[NFP_NET_MAX_RX_RINGS];
509
510 u8 num_irqs; 513 u8 num_irqs;
511 u8 num_r_vecs; 514 u8 num_r_vecs;
512 struct nfp_net_r_vector r_vecs[NFP_NET_MAX_TX_RINGS]; 515 struct nfp_net_r_vector r_vecs[NFP_NET_MAX_TX_RINGS];
@@ -721,6 +724,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn);
721void nfp_net_coalesce_write_cfg(struct nfp_net *nn); 724void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
722int nfp_net_irqs_alloc(struct nfp_net *nn); 725int nfp_net_irqs_alloc(struct nfp_net *nn);
723void nfp_net_irqs_disable(struct nfp_net *nn); 726void nfp_net_irqs_disable(struct nfp_net *nn);
727int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt);
724 728
725#ifdef CONFIG_NFP_NET_DEBUG 729#ifdef CONFIG_NFP_NET_DEBUG
726void nfp_net_debugfs_create(void); 730void nfp_net_debugfs_create(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 43c618bafdb6..0bdff390c958 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -347,12 +347,18 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data)
347/** 347/**
348 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring 348 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
349 * @tx_ring: TX ring structure 349 * @tx_ring: TX ring structure
350 * @r_vec: IRQ vector servicing this ring
351 * @idx: Ring index
350 */ 352 */
351static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring) 353static void
354nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
355 struct nfp_net_r_vector *r_vec, unsigned int idx)
352{ 356{
353 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
354 struct nfp_net *nn = r_vec->nfp_net; 357 struct nfp_net *nn = r_vec->nfp_net;
355 358
359 tx_ring->idx = idx;
360 tx_ring->r_vec = r_vec;
361
356 tx_ring->qcidx = tx_ring->idx * nn->stride_tx; 362 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
357 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); 363 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
358} 364}
@@ -360,12 +366,18 @@ static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring)
360/** 366/**
361 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring 367 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
362 * @rx_ring: RX ring structure 368 * @rx_ring: RX ring structure
369 * @r_vec: IRQ vector servicing this ring
370 * @idx: Ring index
363 */ 371 */
364static void nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring) 372static void
373nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
374 struct nfp_net_r_vector *r_vec, unsigned int idx)
365{ 375{
366 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
367 struct nfp_net *nn = r_vec->nfp_net; 376 struct nfp_net *nn = r_vec->nfp_net;
368 377
378 rx_ring->idx = idx;
379 rx_ring->r_vec = r_vec;
380
369 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; 381 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
370 rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1); 382 rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
371 383
@@ -401,16 +413,6 @@ static void nfp_net_irqs_assign(struct net_device *netdev)
401 r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r; 413 r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
402 414
403 cpumask_set_cpu(r, &r_vec->affinity_mask); 415 cpumask_set_cpu(r, &r_vec->affinity_mask);
404
405 r_vec->tx_ring = &nn->tx_rings[r];
406 nn->tx_rings[r].idx = r;
407 nn->tx_rings[r].r_vec = r_vec;
408 nfp_net_tx_ring_init(r_vec->tx_ring);
409
410 r_vec->rx_ring = &nn->rx_rings[r];
411 nn->rx_rings[r].idx = r;
412 nn->rx_rings[r].r_vec = r_vec;
413 nfp_net_rx_ring_init(r_vec->rx_ring);
414 } 416 }
415} 417}
416 418
@@ -865,61 +867,59 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
865} 867}
866 868
867/** 869/**
868 * nfp_net_tx_flush() - Free any untransmitted buffers currently on the TX ring 870 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
869 * @tx_ring: TX ring structure 871 * @nn: NFP Net device
872 * @tx_ring: TX ring structure
870 * 873 *
871 * Assumes that the device is stopped 874 * Assumes that the device is stopped
872 */ 875 */
873static void nfp_net_tx_flush(struct nfp_net_tx_ring *tx_ring) 876static void
877nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
874{ 878{
875 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
876 struct nfp_net *nn = r_vec->nfp_net;
877 struct pci_dev *pdev = nn->pdev;
878 const struct skb_frag_struct *frag; 879 const struct skb_frag_struct *frag;
879 struct netdev_queue *nd_q; 880 struct netdev_queue *nd_q;
880 struct sk_buff *skb; 881 struct pci_dev *pdev = nn->pdev;
881 int nr_frags;
882 int fidx;
883 int idx;
884 882
885 while (tx_ring->rd_p != tx_ring->wr_p) { 883 while (tx_ring->rd_p != tx_ring->wr_p) {
886 idx = tx_ring->rd_p % tx_ring->cnt; 884 int nr_frags, fidx, idx;
885 struct sk_buff *skb;
887 886
887 idx = tx_ring->rd_p % tx_ring->cnt;
888 skb = tx_ring->txbufs[idx].skb; 888 skb = tx_ring->txbufs[idx].skb;
889 if (skb) { 889 nr_frags = skb_shinfo(skb)->nr_frags;
890 nr_frags = skb_shinfo(skb)->nr_frags; 890 fidx = tx_ring->txbufs[idx].fidx;
891 fidx = tx_ring->txbufs[idx].fidx; 891
892 892 if (fidx == -1) {
893 if (fidx == -1) { 893 /* unmap head */
894 /* unmap head */ 894 dma_unmap_single(&pdev->dev,
895 dma_unmap_single(&pdev->dev, 895 tx_ring->txbufs[idx].dma_addr,
896 tx_ring->txbufs[idx].dma_addr, 896 skb_headlen(skb), DMA_TO_DEVICE);
897 skb_headlen(skb), 897 } else {
898 DMA_TO_DEVICE); 898 /* unmap fragment */
899 } else { 899 frag = &skb_shinfo(skb)->frags[fidx];
900 /* unmap fragment */ 900 dma_unmap_page(&pdev->dev,
901 frag = &skb_shinfo(skb)->frags[fidx]; 901 tx_ring->txbufs[idx].dma_addr,
902 dma_unmap_page(&pdev->dev, 902 skb_frag_size(frag), DMA_TO_DEVICE);
903 tx_ring->txbufs[idx].dma_addr,
904 skb_frag_size(frag),
905 DMA_TO_DEVICE);
906 }
907
908 /* check for last gather fragment */
909 if (fidx == nr_frags - 1)
910 dev_kfree_skb_any(skb);
911
912 tx_ring->txbufs[idx].dma_addr = 0;
913 tx_ring->txbufs[idx].skb = NULL;
914 tx_ring->txbufs[idx].fidx = -2;
915 } 903 }
916 904
917 memset(&tx_ring->txds[idx], 0, sizeof(tx_ring->txds[idx])); 905 /* check for last gather fragment */
906 if (fidx == nr_frags - 1)
907 dev_kfree_skb_any(skb);
908
909 tx_ring->txbufs[idx].dma_addr = 0;
910 tx_ring->txbufs[idx].skb = NULL;
911 tx_ring->txbufs[idx].fidx = -2;
918 912
919 tx_ring->qcp_rd_p++; 913 tx_ring->qcp_rd_p++;
920 tx_ring->rd_p++; 914 tx_ring->rd_p++;
921 } 915 }
922 916
917 memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
918 tx_ring->wr_p = 0;
919 tx_ring->rd_p = 0;
920 tx_ring->qcp_rd_p = 0;
921 tx_ring->wr_ptr_add = 0;
922
923 nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); 923 nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
924 netdev_tx_reset_queue(nd_q); 924 netdev_tx_reset_queue(nd_q);
925} 925}
@@ -957,25 +957,27 @@ static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
957 * nfp_net_rx_alloc_one() - Allocate and map skb for RX 957 * nfp_net_rx_alloc_one() - Allocate and map skb for RX
958 * @rx_ring: RX ring structure of the skb 958 * @rx_ring: RX ring structure of the skb
959 * @dma_addr: Pointer to storage for DMA address (output param) 959 * @dma_addr: Pointer to storage for DMA address (output param)
960 * @fl_bufsz: size of freelist buffers
960 * 961 *
961 * This function will allcate a new skb, map it for DMA. 962 * This function will allcate a new skb, map it for DMA.
962 * 963 *
963 * Return: allocated skb or NULL on failure. 964 * Return: allocated skb or NULL on failure.
964 */ 965 */
965static struct sk_buff * 966static struct sk_buff *
966nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr) 967nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
968 unsigned int fl_bufsz)
967{ 969{
968 struct nfp_net *nn = rx_ring->r_vec->nfp_net; 970 struct nfp_net *nn = rx_ring->r_vec->nfp_net;
969 struct sk_buff *skb; 971 struct sk_buff *skb;
970 972
971 skb = netdev_alloc_skb(nn->netdev, nn->fl_bufsz); 973 skb = netdev_alloc_skb(nn->netdev, fl_bufsz);
972 if (!skb) { 974 if (!skb) {
973 nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n"); 975 nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n");
974 return NULL; 976 return NULL;
975 } 977 }
976 978
977 *dma_addr = dma_map_single(&nn->pdev->dev, skb->data, 979 *dma_addr = dma_map_single(&nn->pdev->dev, skb->data,
978 nn->fl_bufsz, DMA_FROM_DEVICE); 980 fl_bufsz, DMA_FROM_DEVICE);
979 if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { 981 if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
980 dev_kfree_skb_any(skb); 982 dev_kfree_skb_any(skb);
981 nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); 983 nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
@@ -1020,62 +1022,101 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
1020} 1022}
1021 1023
1022/** 1024/**
1023 * nfp_net_rx_flush() - Free any buffers currently on the RX ring 1025 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1024 * @rx_ring: RX ring to remove buffers from 1026 * @rx_ring: RX ring structure
1025 * 1027 *
1026 * Assumes that the device is stopped 1028 * Warning: Do *not* call if ring buffers were never put on the FW freelist
1029 * (i.e. device was not enabled)!
1027 */ 1030 */
1028static void nfp_net_rx_flush(struct nfp_net_rx_ring *rx_ring) 1031static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1029{ 1032{
1030 struct nfp_net *nn = rx_ring->r_vec->nfp_net; 1033 unsigned int wr_idx, last_idx;
1031 struct pci_dev *pdev = nn->pdev;
1032 int idx;
1033 1034
1034 while (rx_ring->rd_p != rx_ring->wr_p) { 1035 /* Move the empty entry to the end of the list */
1035 idx = rx_ring->rd_p % rx_ring->cnt; 1036 wr_idx = rx_ring->wr_p % rx_ring->cnt;
1037 last_idx = rx_ring->cnt - 1;
1038 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1039 rx_ring->rxbufs[wr_idx].skb = rx_ring->rxbufs[last_idx].skb;
1040 rx_ring->rxbufs[last_idx].dma_addr = 0;
1041 rx_ring->rxbufs[last_idx].skb = NULL;
1036 1042
1037 if (rx_ring->rxbufs[idx].skb) { 1043 memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
1038 dma_unmap_single(&pdev->dev, 1044 rx_ring->wr_p = 0;
1039 rx_ring->rxbufs[idx].dma_addr, 1045 rx_ring->rd_p = 0;
1040 nn->fl_bufsz, DMA_FROM_DEVICE); 1046 rx_ring->wr_ptr_add = 0;
1041 dev_kfree_skb_any(rx_ring->rxbufs[idx].skb); 1047}
1042 rx_ring->rxbufs[idx].dma_addr = 0;
1043 rx_ring->rxbufs[idx].skb = NULL;
1044 }
1045 1048
1046 memset(&rx_ring->rxds[idx], 0, sizeof(rx_ring->rxds[idx])); 1049/**
1050 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1051 * @nn: NFP Net device
1052 * @rx_ring: RX ring to remove buffers from
1053 *
1054 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1055 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1056 * to restore required ring geometry.
1057 */
1058static void
1059nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
1060{
1061 struct pci_dev *pdev = nn->pdev;
1062 unsigned int i;
1047 1063
1048 rx_ring->rd_p++; 1064 for (i = 0; i < rx_ring->cnt - 1; i++) {
1065 /* NULL skb can only happen when initial filling of the ring
1066 * fails to allocate enough buffers and calls here to free
1067 * already allocated ones.
1068 */
1069 if (!rx_ring->rxbufs[i].skb)
1070 continue;
1071
1072 dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr,
1073 rx_ring->bufsz, DMA_FROM_DEVICE);
1074 dev_kfree_skb_any(rx_ring->rxbufs[i].skb);
1075 rx_ring->rxbufs[i].dma_addr = 0;
1076 rx_ring->rxbufs[i].skb = NULL;
1049 } 1077 }
1050} 1078}
1051 1079
1052/** 1080/**
1053 * nfp_net_rx_fill_freelist() - Attempt filling freelist with RX buffers 1081 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1054 * @rx_ring: RX ring to fill 1082 * @nn: NFP Net device
1055 * 1083 * @rx_ring: RX ring to remove buffers from
1056 * Try to fill as many buffers as possible into freelist. Return
1057 * number of buffers added.
1058 *
1059 * Return: Number of freelist buffers added.
1060 */ 1084 */
1061static int nfp_net_rx_fill_freelist(struct nfp_net_rx_ring *rx_ring) 1085static int
1086nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
1062{ 1087{
1063 struct sk_buff *skb; 1088 struct nfp_net_rx_buf *rxbufs;
1064 dma_addr_t dma_addr; 1089 unsigned int i;
1090
1091 rxbufs = rx_ring->rxbufs;
1065 1092
1066 while (nfp_net_rx_space(rx_ring)) { 1093 for (i = 0; i < rx_ring->cnt - 1; i++) {
1067 skb = nfp_net_rx_alloc_one(rx_ring, &dma_addr); 1094 rxbufs[i].skb =
1068 if (!skb) { 1095 nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
1069 nfp_net_rx_flush(rx_ring); 1096 rx_ring->bufsz);
1097 if (!rxbufs[i].skb) {
1098 nfp_net_rx_ring_bufs_free(nn, rx_ring);
1070 return -ENOMEM; 1099 return -ENOMEM;
1071 } 1100 }
1072 nfp_net_rx_give_one(rx_ring, skb, dma_addr);
1073 } 1101 }
1074 1102
1075 return 0; 1103 return 0;
1076} 1104}
1077 1105
1078/** 1106/**
1107 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1108 * @rx_ring: RX ring to fill
1109 */
1110static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
1111{
1112 unsigned int i;
1113
1114 for (i = 0; i < rx_ring->cnt - 1; i++)
1115 nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].skb,
1116 rx_ring->rxbufs[i].dma_addr);
1117}
1118
1119/**
1079 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors 1120 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1080 * @flags: RX descriptor flags field in CPU byte order 1121 * @flags: RX descriptor flags field in CPU byte order
1081 */ 1122 */
@@ -1240,7 +1281,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1240 1281
1241 skb = rx_ring->rxbufs[idx].skb; 1282 skb = rx_ring->rxbufs[idx].skb;
1242 1283
1243 new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr); 1284 new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr,
1285 nn->fl_bufsz);
1244 if (!new_skb) { 1286 if (!new_skb) {
1245 nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb, 1287 nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb,
1246 rx_ring->rxbufs[idx].dma_addr); 1288 rx_ring->rxbufs[idx].dma_addr);
@@ -1349,10 +1391,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
1349 struct nfp_net *nn = r_vec->nfp_net; 1391 struct nfp_net *nn = r_vec->nfp_net;
1350 struct pci_dev *pdev = nn->pdev; 1392 struct pci_dev *pdev = nn->pdev;
1351 1393
1352 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), 0);
1353 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), 0);
1354 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), 0);
1355
1356 kfree(tx_ring->txbufs); 1394 kfree(tx_ring->txbufs);
1357 1395
1358 if (tx_ring->txds) 1396 if (tx_ring->txds)
@@ -1360,11 +1398,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
1360 tx_ring->txds, tx_ring->dma); 1398 tx_ring->txds, tx_ring->dma);
1361 1399
1362 tx_ring->cnt = 0; 1400 tx_ring->cnt = 0;
1363 tx_ring->wr_p = 0;
1364 tx_ring->rd_p = 0;
1365 tx_ring->qcp_rd_p = 0;
1366 tx_ring->wr_ptr_add = 0;
1367
1368 tx_ring->txbufs = NULL; 1401 tx_ring->txbufs = NULL;
1369 tx_ring->txds = NULL; 1402 tx_ring->txds = NULL;
1370 tx_ring->dma = 0; 1403 tx_ring->dma = 0;
@@ -1374,17 +1407,18 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
1374/** 1407/**
1375 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring 1408 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
1376 * @tx_ring: TX Ring structure to allocate 1409 * @tx_ring: TX Ring structure to allocate
1410 * @cnt: Ring buffer count
1377 * 1411 *
1378 * Return: 0 on success, negative errno otherwise. 1412 * Return: 0 on success, negative errno otherwise.
1379 */ 1413 */
1380static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring) 1414static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
1381{ 1415{
1382 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 1416 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1383 struct nfp_net *nn = r_vec->nfp_net; 1417 struct nfp_net *nn = r_vec->nfp_net;
1384 struct pci_dev *pdev = nn->pdev; 1418 struct pci_dev *pdev = nn->pdev;
1385 int sz; 1419 int sz;
1386 1420
1387 tx_ring->cnt = nn->txd_cnt; 1421 tx_ring->cnt = cnt;
1388 1422
1389 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt; 1423 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
1390 tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size, 1424 tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
@@ -1397,11 +1431,6 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring)
1397 if (!tx_ring->txbufs) 1431 if (!tx_ring->txbufs)
1398 goto err_alloc; 1432 goto err_alloc;
1399 1433
1400 /* Write the DMA address, size and MSI-X info to the device */
1401 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), tx_ring->dma);
1402 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), ilog2(tx_ring->cnt));
1403 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), r_vec->irq_idx);
1404
1405 netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx); 1434 netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx);
1406 1435
1407 nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n", 1436 nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
@@ -1415,6 +1444,59 @@ err_alloc:
1415 return -ENOMEM; 1444 return -ENOMEM;
1416} 1445}
1417 1446
1447static struct nfp_net_tx_ring *
1448nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt)
1449{
1450 struct nfp_net_tx_ring *rings;
1451 unsigned int r;
1452
1453 rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL);
1454 if (!rings)
1455 return NULL;
1456
1457 for (r = 0; r < nn->num_tx_rings; r++) {
1458 nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r);
1459
1460 if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt))
1461 goto err_free_prev;
1462 }
1463
1464 return rings;
1465
1466err_free_prev:
1467 while (r--)
1468 nfp_net_tx_ring_free(&rings[r]);
1469 kfree(rings);
1470 return NULL;
1471}
1472
1473static struct nfp_net_tx_ring *
1474nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
1475{
1476 struct nfp_net_tx_ring *old = nn->tx_rings;
1477 unsigned int r;
1478
1479 for (r = 0; r < nn->num_tx_rings; r++)
1480 old[r].r_vec->tx_ring = &rings[r];
1481
1482 nn->tx_rings = rings;
1483 return old;
1484}
1485
1486static void
1487nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
1488{
1489 unsigned int r;
1490
1491 if (!rings)
1492 return;
1493
1494 for (r = 0; r < nn->num_tx_rings; r++)
1495 nfp_net_tx_ring_free(&rings[r]);
1496
1497 kfree(rings);
1498}
1499
1418/** 1500/**
1419 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring 1501 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
1420 * @rx_ring: RX ring to free 1502 * @rx_ring: RX ring to free
@@ -1425,10 +1507,6 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
1425 struct nfp_net *nn = r_vec->nfp_net; 1507 struct nfp_net *nn = r_vec->nfp_net;
1426 struct pci_dev *pdev = nn->pdev; 1508 struct pci_dev *pdev = nn->pdev;
1427 1509
1428 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), 0);
1429 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), 0);
1430 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), 0);
1431
1432 kfree(rx_ring->rxbufs); 1510 kfree(rx_ring->rxbufs);
1433 1511
1434 if (rx_ring->rxds) 1512 if (rx_ring->rxds)
@@ -1436,10 +1514,6 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
1436 rx_ring->rxds, rx_ring->dma); 1514 rx_ring->rxds, rx_ring->dma);
1437 1515
1438 rx_ring->cnt = 0; 1516 rx_ring->cnt = 0;
1439 rx_ring->wr_p = 0;
1440 rx_ring->rd_p = 0;
1441 rx_ring->wr_ptr_add = 0;
1442
1443 rx_ring->rxbufs = NULL; 1517 rx_ring->rxbufs = NULL;
1444 rx_ring->rxds = NULL; 1518 rx_ring->rxds = NULL;
1445 rx_ring->dma = 0; 1519 rx_ring->dma = 0;
@@ -1449,17 +1523,22 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
1449/** 1523/**
1450 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring 1524 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
1451 * @rx_ring: RX ring to allocate 1525 * @rx_ring: RX ring to allocate
1526 * @fl_bufsz: Size of buffers to allocate
1527 * @cnt: Ring buffer count
1452 * 1528 *
1453 * Return: 0 on success, negative errno otherwise. 1529 * Return: 0 on success, negative errno otherwise.
1454 */ 1530 */
1455static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring) 1531static int
1532nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
1533 u32 cnt)
1456{ 1534{
1457 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; 1535 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1458 struct nfp_net *nn = r_vec->nfp_net; 1536 struct nfp_net *nn = r_vec->nfp_net;
1459 struct pci_dev *pdev = nn->pdev; 1537 struct pci_dev *pdev = nn->pdev;
1460 int sz; 1538 int sz;
1461 1539
1462 rx_ring->cnt = nn->rxd_cnt; 1540 rx_ring->cnt = cnt;
1541 rx_ring->bufsz = fl_bufsz;
1463 1542
1464 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; 1543 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
1465 rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size, 1544 rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
@@ -1472,11 +1551,6 @@ static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring)
1472 if (!rx_ring->rxbufs) 1551 if (!rx_ring->rxbufs)
1473 goto err_alloc; 1552 goto err_alloc;
1474 1553
1475 /* Write the DMA address, size and MSI-X info to the device */
1476 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), rx_ring->dma);
1477 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), ilog2(rx_ring->cnt));
1478 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), r_vec->irq_idx);
1479
1480 nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n", 1554 nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
1481 rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx, 1555 rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
1482 rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds); 1556 rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
@@ -1488,91 +1562,109 @@ err_alloc:
1488 return -ENOMEM; 1562 return -ENOMEM;
1489} 1563}
1490 1564
1491static void __nfp_net_free_rings(struct nfp_net *nn, unsigned int n_free) 1565static struct nfp_net_rx_ring *
1566nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz,
1567 u32 buf_cnt)
1492{ 1568{
1493 struct nfp_net_r_vector *r_vec; 1569 struct nfp_net_rx_ring *rings;
1494 struct msix_entry *entry; 1570 unsigned int r;
1495 1571
1496 while (n_free--) { 1572 rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
1497 r_vec = &nn->r_vecs[n_free]; 1573 if (!rings)
1498 entry = &nn->irq_entries[r_vec->irq_idx]; 1574 return NULL;
1499 1575
1500 nfp_net_rx_ring_free(r_vec->rx_ring); 1576 for (r = 0; r < nn->num_rx_rings; r++) {
1501 nfp_net_tx_ring_free(r_vec->tx_ring); 1577 nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
1502 1578
1503 irq_set_affinity_hint(entry->vector, NULL); 1579 if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt))
1504 free_irq(entry->vector, r_vec); 1580 goto err_free_prev;
1505 1581
1506 netif_napi_del(&r_vec->napi); 1582 if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
1583 goto err_free_ring;
1584 }
1585
1586 return rings;
1587
1588err_free_prev:
1589 while (r--) {
1590 nfp_net_rx_ring_bufs_free(nn, &rings[r]);
1591err_free_ring:
1592 nfp_net_rx_ring_free(&rings[r]);
1507 } 1593 }
1594 kfree(rings);
1595 return NULL;
1508} 1596}
1509 1597
1510/** 1598static struct nfp_net_rx_ring *
1511 * nfp_net_free_rings() - Free all ring resources 1599nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
1512 * @nn: NFP Net device to reconfigure
1513 */
1514static void nfp_net_free_rings(struct nfp_net *nn)
1515{ 1600{
1516 __nfp_net_free_rings(nn, nn->num_r_vecs); 1601 struct nfp_net_rx_ring *old = nn->rx_rings;
1602 unsigned int r;
1603
1604 for (r = 0; r < nn->num_rx_rings; r++)
1605 old[r].r_vec->rx_ring = &rings[r];
1606
1607 nn->rx_rings = rings;
1608 return old;
1517} 1609}
1518 1610
1519/** 1611static void
1520 * nfp_net_alloc_rings() - Allocate resources for RX and TX rings 1612nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
1521 * @nn: NFP Net device to reconfigure
1522 *
1523 * Return: 0 on success or negative errno on error.
1524 */
1525static int nfp_net_alloc_rings(struct nfp_net *nn)
1526{ 1613{
1527 struct nfp_net_r_vector *r_vec; 1614 unsigned int r;
1528 struct msix_entry *entry; 1615
1529 int err; 1616 if (!rings)
1530 int r; 1617 return;
1531 1618
1532 for (r = 0; r < nn->num_r_vecs; r++) { 1619 for (r = 0; r < nn->num_r_vecs; r++) {
1533 r_vec = &nn->r_vecs[r]; 1620 nfp_net_rx_ring_bufs_free(nn, &rings[r]);
1534 entry = &nn->irq_entries[r_vec->irq_idx]; 1621 nfp_net_rx_ring_free(&rings[r]);
1535 1622 }
1536 /* Setup NAPI */
1537 netif_napi_add(nn->netdev, &r_vec->napi,
1538 nfp_net_poll, NAPI_POLL_WEIGHT);
1539
1540 snprintf(r_vec->name, sizeof(r_vec->name),
1541 "%s-rxtx-%d", nn->netdev->name, r);
1542 err = request_irq(entry->vector, r_vec->handler, 0,
1543 r_vec->name, r_vec);
1544 if (err) {
1545 nn_dbg(nn, "Error requesting IRQ %d\n", entry->vector);
1546 goto err_napi_del;
1547 }
1548 1623
1549 irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask); 1624 kfree(rings);
1625}
1550 1626
1551 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", 1627static int
1552 r, entry->vector, entry->entry); 1628nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1629 int idx)
1630{
1631 struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
1632 int err;
1553 1633
1554 /* Allocate TX ring resources */ 1634 r_vec->tx_ring = &nn->tx_rings[idx];
1555 err = nfp_net_tx_ring_alloc(r_vec->tx_ring); 1635 nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
1556 if (err)
1557 goto err_free_irq;
1558 1636
1559 /* Allocate RX ring resources */ 1637 r_vec->rx_ring = &nn->rx_rings[idx];
1560 err = nfp_net_rx_ring_alloc(r_vec->rx_ring); 1638 nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
1561 if (err) 1639
1562 goto err_free_tx; 1640 snprintf(r_vec->name, sizeof(r_vec->name),
1641 "%s-rxtx-%d", nn->netdev->name, idx);
1642 err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
1643 if (err) {
1644 nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
1645 return err;
1563 } 1646 }
1647 disable_irq(entry->vector);
1648
1649 /* Setup NAPI */
1650 netif_napi_add(nn->netdev, &r_vec->napi,
1651 nfp_net_poll, NAPI_POLL_WEIGHT);
1652
1653 irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
1654
1655 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
1564 1656
1565 return 0; 1657 return 0;
1658}
1659
1660static void
1661nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
1662{
1663 struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
1566 1664
1567err_free_tx:
1568 nfp_net_tx_ring_free(r_vec->tx_ring);
1569err_free_irq:
1570 irq_set_affinity_hint(entry->vector, NULL); 1665 irq_set_affinity_hint(entry->vector, NULL);
1571 free_irq(entry->vector, r_vec);
1572err_napi_del:
1573 netif_napi_del(&r_vec->napi); 1666 netif_napi_del(&r_vec->napi);
1574 __nfp_net_free_rings(nn, r); 1667 free_irq(entry->vector, r_vec);
1575 return err;
1576} 1668}
1577 1669
1578/** 1670/**
@@ -1646,6 +1738,17 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
1646 get_unaligned_be16(nn->netdev->dev_addr + 4) << 16); 1738 get_unaligned_be16(nn->netdev->dev_addr + 4) << 16);
1647} 1739}
1648 1740
1741static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
1742{
1743 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
1744 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
1745 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
1746
1747 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
1748 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
1749 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
1750}
1751
1649/** 1752/**
1650 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP 1753 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
1651 * @nn: NFP Net device to reconfigure 1754 * @nn: NFP Net device to reconfigure
@@ -1653,6 +1756,7 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
1653static void nfp_net_clear_config_and_disable(struct nfp_net *nn) 1756static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
1654{ 1757{
1655 u32 new_ctrl, update; 1758 u32 new_ctrl, update;
1759 unsigned int r;
1656 int err; 1760 int err;
1657 1761
1658 new_ctrl = nn->ctrl; 1762 new_ctrl = nn->ctrl;
@@ -1669,79 +1773,40 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
1669 1773
1670 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 1774 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1671 err = nfp_net_reconfig(nn, update); 1775 err = nfp_net_reconfig(nn, update);
1672 if (err) { 1776 if (err)
1673 nn_err(nn, "Could not disable device: %d\n", err); 1777 nn_err(nn, "Could not disable device: %d\n", err);
1674 return; 1778
1779 for (r = 0; r < nn->num_r_vecs; r++) {
1780 nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
1781 nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
1782 nfp_net_vec_clear_ring_data(nn, r);
1675 } 1783 }
1676 1784
1677 nn->ctrl = new_ctrl; 1785 nn->ctrl = new_ctrl;
1678} 1786}
1679 1787
1680/** 1788static void
1681 * nfp_net_start_vec() - Start ring vector 1789nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1682 * @nn: NFP Net device structure 1790 unsigned int idx)
1683 * @r_vec: Ring vector to be started
1684 */
1685static int nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
1686{ 1791{
1687 unsigned int irq_vec; 1792 /* Write the DMA address, size and MSI-X info to the device */
1688 int err = 0; 1793 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma);
1689 1794 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt));
1690 irq_vec = nn->irq_entries[r_vec->irq_idx].vector; 1795 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx);
1691
1692 disable_irq(irq_vec);
1693
1694 err = nfp_net_rx_fill_freelist(r_vec->rx_ring);
1695 if (err) {
1696 nn_err(nn, "RV%02d: couldn't allocate enough buffers\n",
1697 r_vec->irq_idx);
1698 goto out;
1699 }
1700
1701 napi_enable(&r_vec->napi);
1702out:
1703 enable_irq(irq_vec);
1704 1796
1705 return err; 1797 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma);
1798 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt));
1799 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx);
1706} 1800}
1707 1801
1708static int nfp_net_netdev_open(struct net_device *netdev) 1802static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
1709{ 1803{
1710 struct nfp_net *nn = netdev_priv(netdev); 1804 u32 new_ctrl, update = 0;
1711 int err, r; 1805 unsigned int r;
1712 u32 update = 0; 1806 int err;
1713 u32 new_ctrl;
1714
1715 if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
1716 nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
1717 return -EBUSY;
1718 }
1719 1807
1720 new_ctrl = nn->ctrl; 1808 new_ctrl = nn->ctrl;
1721 1809
1722 /* Step 1: Allocate resources for rings and the like
1723 * - Request interrupts
1724 * - Allocate RX and TX ring resources
1725 * - Setup initial RSS table
1726 */
1727 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
1728 nn->exn_name, sizeof(nn->exn_name),
1729 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
1730 if (err)
1731 return err;
1732
1733 err = nfp_net_alloc_rings(nn);
1734 if (err)
1735 goto err_free_exn;
1736
1737 err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
1738 if (err)
1739 goto err_free_rings;
1740
1741 err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
1742 if (err)
1743 goto err_free_rings;
1744
1745 if (nn->cap & NFP_NET_CFG_CTRL_RSS) { 1810 if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
1746 nfp_net_rss_write_key(nn); 1811 nfp_net_rss_write_key(nn);
1747 nfp_net_rss_write_itbl(nn); 1812 nfp_net_rss_write_itbl(nn);
@@ -1756,22 +1821,18 @@ static int nfp_net_netdev_open(struct net_device *netdev)
1756 update |= NFP_NET_CFG_UPDATE_IRQMOD; 1821 update |= NFP_NET_CFG_UPDATE_IRQMOD;
1757 } 1822 }
1758 1823
1759 /* Step 2: Configure the NFP 1824 for (r = 0; r < nn->num_r_vecs; r++)
1760 * - Enable rings from 0 to tx_rings/rx_rings - 1. 1825 nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r);
1761 * - Write MAC address (in case it changed) 1826
1762 * - Set the MTU
1763 * - Set the Freelist buffer size
1764 * - Enable the FW
1765 */
1766 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ? 1827 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
1767 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1); 1828 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
1768 1829
1769 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? 1830 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
1770 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); 1831 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
1771 1832
1772 nfp_net_write_mac_addr(nn, netdev->dev_addr); 1833 nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
1773 1834
1774 nn_writel(nn, NFP_NET_CFG_MTU, netdev->mtu); 1835 nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
1775 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); 1836 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
1776 1837
1777 /* Enable device */ 1838 /* Enable device */
@@ -1784,69 +1845,213 @@ static int nfp_net_netdev_open(struct net_device *netdev)
1784 1845
1785 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 1846 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1786 err = nfp_net_reconfig(nn, update); 1847 err = nfp_net_reconfig(nn, update);
1787 if (err)
1788 goto err_clear_config;
1789 1848
1790 nn->ctrl = new_ctrl; 1849 nn->ctrl = new_ctrl;
1791 1850
1851 for (r = 0; r < nn->num_r_vecs; r++)
1852 nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
1853
1792 /* Since reconfiguration requests while NFP is down are ignored we 1854 /* Since reconfiguration requests while NFP is down are ignored we
1793 * have to wipe the entire VXLAN configuration and reinitialize it. 1855 * have to wipe the entire VXLAN configuration and reinitialize it.
1794 */ 1856 */
1795 if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { 1857 if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
1796 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); 1858 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
1797 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); 1859 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
1798 vxlan_get_rx_port(netdev); 1860 vxlan_get_rx_port(nn->netdev);
1799 } 1861 }
1800 1862
1801 /* Step 3: Enable for kernel 1863 return err;
1802 * - put some freelist descriptors on each RX ring 1864}
1803 * - enable NAPI on each ring 1865
1804 * - enable all TX queues 1866/**
1805 * - set link state 1867 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
1806 */ 1868 * @nn: NFP Net device to reconfigure
1869 */
1870static int nfp_net_set_config_and_enable(struct nfp_net *nn)
1871{
1872 int err;
1873
1874 err = __nfp_net_set_config_and_enable(nn);
1875 if (err)
1876 nfp_net_clear_config_and_disable(nn);
1877
1878 return err;
1879}
1880
1881/**
1882 * nfp_net_open_stack() - Start the device from stack's perspective
1883 * @nn: NFP Net device to reconfigure
1884 */
1885static void nfp_net_open_stack(struct nfp_net *nn)
1886{
1887 unsigned int r;
1888
1807 for (r = 0; r < nn->num_r_vecs; r++) { 1889 for (r = 0; r < nn->num_r_vecs; r++) {
1808 err = nfp_net_start_vec(nn, &nn->r_vecs[r]); 1890 napi_enable(&nn->r_vecs[r].napi);
1809 if (err) 1891 enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
1810 goto err_disable_napi;
1811 } 1892 }
1812 1893
1813 netif_tx_wake_all_queues(netdev); 1894 netif_tx_wake_all_queues(nn->netdev);
1895
1896 enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
1897 nfp_net_read_link_status(nn);
1898}
1814 1899
1900static int nfp_net_netdev_open(struct net_device *netdev)
1901{
1902 struct nfp_net *nn = netdev_priv(netdev);
1903 int err, r;
1904
1905 if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
1906 nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
1907 return -EBUSY;
1908 }
1909
1910 /* Step 1: Allocate resources for rings and the like
1911 * - Request interrupts
1912 * - Allocate RX and TX ring resources
1913 * - Setup initial RSS table
1914 */
1915 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
1916 nn->exn_name, sizeof(nn->exn_name),
1917 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
1918 if (err)
1919 return err;
1815 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc", 1920 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
1816 nn->lsc_name, sizeof(nn->lsc_name), 1921 nn->lsc_name, sizeof(nn->lsc_name),
1817 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler); 1922 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
1818 if (err) 1923 if (err)
1819 goto err_stop_tx; 1924 goto err_free_exn;
1820 nfp_net_read_link_status(nn); 1925 disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
1821 1926
1822 return 0; 1927 nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
1928 GFP_KERNEL);
1929 if (!nn->rx_rings)
1930 goto err_free_lsc;
1931 nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
1932 GFP_KERNEL);
1933 if (!nn->tx_rings)
1934 goto err_free_rx_rings;
1823 1935
1824err_stop_tx: 1936 for (r = 0; r < nn->num_r_vecs; r++) {
1825 netif_tx_disable(netdev); 1937 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
1826 for (r = 0; r < nn->num_r_vecs; r++) 1938 if (err)
1827 nfp_net_tx_flush(nn->r_vecs[r].tx_ring); 1939 goto err_free_prev_vecs;
1828err_disable_napi: 1940
1829 while (r--) { 1941 err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
1830 napi_disable(&nn->r_vecs[r].napi); 1942 if (err)
1831 nfp_net_rx_flush(nn->r_vecs[r].rx_ring); 1943 goto err_cleanup_vec_p;
1944
1945 err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
1946 nn->fl_bufsz, nn->rxd_cnt);
1947 if (err)
1948 goto err_free_tx_ring_p;
1949
1950 err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
1951 if (err)
1952 goto err_flush_rx_ring_p;
1832 } 1953 }
1833err_clear_config: 1954
1834 nfp_net_clear_config_and_disable(nn); 1955 err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
1956 if (err)
1957 goto err_free_rings;
1958
1959 err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
1960 if (err)
1961 goto err_free_rings;
1962
1963 /* Step 2: Configure the NFP
1964 * - Enable rings from 0 to tx_rings/rx_rings - 1.
1965 * - Write MAC address (in case it changed)
1966 * - Set the MTU
1967 * - Set the Freelist buffer size
1968 * - Enable the FW
1969 */
1970 err = nfp_net_set_config_and_enable(nn);
1971 if (err)
1972 goto err_free_rings;
1973
1974 /* Step 3: Enable for kernel
1975 * - put some freelist descriptors on each RX ring
1976 * - enable NAPI on each ring
1977 * - enable all TX queues
1978 * - set link state
1979 */
1980 nfp_net_open_stack(nn);
1981
1982 return 0;
1983
1835err_free_rings: 1984err_free_rings:
1836 nfp_net_free_rings(nn); 1985 r = nn->num_r_vecs;
1986err_free_prev_vecs:
1987 while (r--) {
1988 nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
1989err_flush_rx_ring_p:
1990 nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
1991err_free_tx_ring_p:
1992 nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
1993err_cleanup_vec_p:
1994 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1995 }
1996 kfree(nn->tx_rings);
1997err_free_rx_rings:
1998 kfree(nn->rx_rings);
1999err_free_lsc:
2000 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1837err_free_exn: 2001err_free_exn:
1838 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); 2002 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
1839 return err; 2003 return err;
1840} 2004}
1841 2005
1842/** 2006/**
2007 * nfp_net_close_stack() - Quiescent the stack (part of close)
2008 * @nn: NFP Net device to reconfigure
2009 */
2010static void nfp_net_close_stack(struct nfp_net *nn)
2011{
2012 unsigned int r;
2013
2014 disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
2015 netif_carrier_off(nn->netdev);
2016 nn->link_up = false;
2017
2018 for (r = 0; r < nn->num_r_vecs; r++) {
2019 disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
2020 napi_disable(&nn->r_vecs[r].napi);
2021 }
2022
2023 netif_tx_disable(nn->netdev);
2024}
2025
2026/**
2027 * nfp_net_close_free_all() - Free all runtime resources
2028 * @nn: NFP Net device to reconfigure
2029 */
2030static void nfp_net_close_free_all(struct nfp_net *nn)
2031{
2032 unsigned int r;
2033
2034 for (r = 0; r < nn->num_r_vecs; r++) {
2035 nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
2036 nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
2037 nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
2038 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2039 }
2040
2041 kfree(nn->rx_rings);
2042 kfree(nn->tx_rings);
2043
2044 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2045 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2046}
2047
2048/**
1843 * nfp_net_netdev_close() - Called when the device is downed 2049 * nfp_net_netdev_close() - Called when the device is downed
1844 * @netdev: netdev structure 2050 * @netdev: netdev structure
1845 */ 2051 */
1846static int nfp_net_netdev_close(struct net_device *netdev) 2052static int nfp_net_netdev_close(struct net_device *netdev)
1847{ 2053{
1848 struct nfp_net *nn = netdev_priv(netdev); 2054 struct nfp_net *nn = netdev_priv(netdev);
1849 int r;
1850 2055
1851 if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) { 2056 if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
1852 nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl); 2057 nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
@@ -1855,14 +2060,7 @@ static int nfp_net_netdev_close(struct net_device *netdev)
1855 2060
1856 /* Step 1: Disable RX and TX rings from the Linux kernel perspective 2061 /* Step 1: Disable RX and TX rings from the Linux kernel perspective
1857 */ 2062 */
1858 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 2063 nfp_net_close_stack(nn);
1859 netif_carrier_off(netdev);
1860 nn->link_up = false;
1861
1862 for (r = 0; r < nn->num_r_vecs; r++)
1863 napi_disable(&nn->r_vecs[r].napi);
1864
1865 netif_tx_disable(netdev);
1866 2064
1867 /* Step 2: Tell NFP 2065 /* Step 2: Tell NFP
1868 */ 2066 */
@@ -1870,13 +2068,7 @@ static int nfp_net_netdev_close(struct net_device *netdev)
1870 2068
1871 /* Step 3: Free resources 2069 /* Step 3: Free resources
1872 */ 2070 */
1873 for (r = 0; r < nn->num_r_vecs; r++) { 2071 nfp_net_close_free_all(nn);
1874 nfp_net_rx_flush(nn->r_vecs[r].rx_ring);
1875 nfp_net_tx_flush(nn->r_vecs[r].tx_ring);
1876 }
1877
1878 nfp_net_free_rings(nn);
1879 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
1880 2072
1881 nn_dbg(nn, "%s down", netdev->name); 2073 nn_dbg(nn, "%s down", netdev->name);
1882 return 0; 2074 return 0;
@@ -1910,29 +2102,132 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
1910 2102
1911static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) 2103static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
1912{ 2104{
2105 unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz;
1913 struct nfp_net *nn = netdev_priv(netdev); 2106 struct nfp_net *nn = netdev_priv(netdev);
1914 u32 tmp; 2107 struct nfp_net_rx_ring *tmp_rings;
1915 2108 int err;
1916 nn_dbg(nn, "New MTU = %d\n", new_mtu);
1917 2109
1918 if (new_mtu < 68 || new_mtu > nn->max_mtu) { 2110 if (new_mtu < 68 || new_mtu > nn->max_mtu) {
1919 nn_err(nn, "New MTU (%d) is not valid\n", new_mtu); 2111 nn_err(nn, "New MTU (%d) is not valid\n", new_mtu);
1920 return -EINVAL; 2112 return -EINVAL;
1921 } 2113 }
1922 2114
2115 old_mtu = netdev->mtu;
2116 old_fl_bufsz = nn->fl_bufsz;
2117 new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu;
2118
2119 if (!netif_running(netdev)) {
2120 netdev->mtu = new_mtu;
2121 nn->fl_bufsz = new_fl_bufsz;
2122 return 0;
2123 }
2124
2125 /* Prepare new rings */
2126 tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz,
2127 nn->rxd_cnt);
2128 if (!tmp_rings)
2129 return -ENOMEM;
2130
2131 /* Stop device, swap in new rings, try to start the firmware */
2132 nfp_net_close_stack(nn);
2133 nfp_net_clear_config_and_disable(nn);
2134
2135 tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
2136
1923 netdev->mtu = new_mtu; 2137 netdev->mtu = new_mtu;
2138 nn->fl_bufsz = new_fl_bufsz;
1924 2139
1925 /* Freelist buffer size rounded up to the nearest 1K */ 2140 err = nfp_net_set_config_and_enable(nn);
1926 tmp = new_mtu + ETH_HLEN + VLAN_HLEN + NFP_NET_MAX_PREPEND; 2141 if (err) {
1927 nn->fl_bufsz = roundup(tmp, 1024); 2142 const int err_new = err;
2143
2144 /* Try with old configuration and old rings */
2145 tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
2146
2147 netdev->mtu = old_mtu;
2148 nn->fl_bufsz = old_fl_bufsz;
1928 2149
1929 /* restart if running */ 2150 err = __nfp_net_set_config_and_enable(nn);
1930 if (netif_running(netdev)) { 2151 if (err)
1931 nfp_net_netdev_close(netdev); 2152 nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n",
1932 nfp_net_netdev_open(netdev); 2153 err_new, err);
1933 } 2154 }
1934 2155
1935 return 0; 2156 nfp_net_shadow_rx_rings_free(nn, tmp_rings);
2157
2158 nfp_net_open_stack(nn);
2159
2160 return err;
2161}
2162
2163int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
2164{
2165 struct nfp_net_tx_ring *tx_rings = NULL;
2166 struct nfp_net_rx_ring *rx_rings = NULL;
2167 u32 old_rxd_cnt, old_txd_cnt;
2168 int err;
2169
2170 if (!netif_running(nn->netdev)) {
2171 nn->rxd_cnt = rxd_cnt;
2172 nn->txd_cnt = txd_cnt;
2173 return 0;
2174 }
2175
2176 old_rxd_cnt = nn->rxd_cnt;
2177 old_txd_cnt = nn->txd_cnt;
2178
2179 /* Prepare new rings */
2180 if (nn->rxd_cnt != rxd_cnt) {
2181 rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz,
2182 rxd_cnt);
2183 if (!rx_rings)
2184 return -ENOMEM;
2185 }
2186 if (nn->txd_cnt != txd_cnt) {
2187 tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt);
2188 if (!tx_rings) {
2189 nfp_net_shadow_rx_rings_free(nn, rx_rings);
2190 return -ENOMEM;
2191 }
2192 }
2193
2194 /* Stop device, swap in new rings, try to start the firmware */
2195 nfp_net_close_stack(nn);
2196 nfp_net_clear_config_and_disable(nn);
2197
2198 if (rx_rings)
2199 rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
2200 if (tx_rings)
2201 tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
2202
2203 nn->rxd_cnt = rxd_cnt;
2204 nn->txd_cnt = txd_cnt;
2205
2206 err = nfp_net_set_config_and_enable(nn);
2207 if (err) {
2208 const int err_new = err;
2209
2210 /* Try with old configuration and old rings */
2211 if (rx_rings)
2212 rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
2213 if (tx_rings)
2214 tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
2215
2216 nn->rxd_cnt = old_rxd_cnt;
2217 nn->txd_cnt = old_txd_cnt;
2218
2219 err = __nfp_net_set_config_and_enable(nn);
2220 if (err)
2221 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
2222 err_new, err);
2223 }
2224
2225 nfp_net_shadow_rx_rings_free(nn, rx_rings);
2226 nfp_net_shadow_tx_rings_free(nn, tx_rings);
2227
2228 nfp_net_open_stack(nn);
2229
2230 return err;
1936} 2231}
1937 2232
1938static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, 2233static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 4c97c713121c..f86a1f13d27b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -40,8 +40,9 @@ static struct dentry *nfp_dir;
40 40
41static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) 41static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
42{ 42{
43 struct nfp_net_rx_ring *rx_ring = file->private;
44 int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt; 43 int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt;
44 struct nfp_net_r_vector *r_vec = file->private;
45 struct nfp_net_rx_ring *rx_ring;
45 struct nfp_net_rx_desc *rxd; 46 struct nfp_net_rx_desc *rxd;
46 struct sk_buff *skb; 47 struct sk_buff *skb;
47 struct nfp_net *nn; 48 struct nfp_net *nn;
@@ -49,9 +50,10 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
49 50
50 rtnl_lock(); 51 rtnl_lock();
51 52
52 if (!rx_ring->r_vec || !rx_ring->r_vec->nfp_net) 53 if (!r_vec->nfp_net || !r_vec->rx_ring)
53 goto out; 54 goto out;
54 nn = rx_ring->r_vec->nfp_net; 55 nn = r_vec->nfp_net;
56 rx_ring = r_vec->rx_ring;
55 if (!netif_running(nn->netdev)) 57 if (!netif_running(nn->netdev))
56 goto out; 58 goto out;
57 59
@@ -115,7 +117,8 @@ static const struct file_operations nfp_rx_q_fops = {
115 117
116static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) 118static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
117{ 119{
118 struct nfp_net_tx_ring *tx_ring = file->private; 120 struct nfp_net_r_vector *r_vec = file->private;
121 struct nfp_net_tx_ring *tx_ring;
119 struct nfp_net_tx_desc *txd; 122 struct nfp_net_tx_desc *txd;
120 int d_rd_p, d_wr_p, txd_cnt; 123 int d_rd_p, d_wr_p, txd_cnt;
121 struct sk_buff *skb; 124 struct sk_buff *skb;
@@ -124,9 +127,10 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
124 127
125 rtnl_lock(); 128 rtnl_lock();
126 129
127 if (!tx_ring->r_vec || !tx_ring->r_vec->nfp_net) 130 if (!r_vec->nfp_net || !r_vec->tx_ring)
128 goto out; 131 goto out;
129 nn = tx_ring->r_vec->nfp_net; 132 nn = r_vec->nfp_net;
133 tx_ring = r_vec->tx_ring;
130 if (!netif_running(nn->netdev)) 134 if (!netif_running(nn->netdev))
131 goto out; 135 goto out;
132 136
@@ -207,13 +211,13 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
207 for (i = 0; i < nn->num_rx_rings; i++) { 211 for (i = 0; i < nn->num_rx_rings; i++) {
208 sprintf(int_name, "%d", i); 212 sprintf(int_name, "%d", i);
209 debugfs_create_file(int_name, S_IRUSR, rx, 213 debugfs_create_file(int_name, S_IRUSR, rx,
210 &nn->rx_rings[i], &nfp_rx_q_fops); 214 &nn->r_vecs[i], &nfp_rx_q_fops);
211 } 215 }
212 216
213 for (i = 0; i < nn->num_tx_rings; i++) { 217 for (i = 0; i < nn->num_tx_rings; i++) {
214 sprintf(int_name, "%d", i); 218 sprintf(int_name, "%d", i);
215 debugfs_create_file(int_name, S_IRUSR, tx, 219 debugfs_create_file(int_name, S_IRUSR, tx,
216 &nn->tx_rings[i], &nfp_tx_q_fops); 220 &nn->r_vecs[i], &nfp_tx_q_fops);
217 } 221 }
218} 222}
219 223
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 9a4084a68db5..ccfef1f17627 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -153,37 +153,25 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
153 struct nfp_net *nn = netdev_priv(netdev); 153 struct nfp_net *nn = netdev_priv(netdev);
154 u32 rxd_cnt, txd_cnt; 154 u32 rxd_cnt, txd_cnt;
155 155
156 if (netif_running(netdev)) {
157 /* Some NIC drivers allow reconfiguration on the fly,
158 * some down the interface, change and then up it
159 * again. For now we don't allow changes when the
160 * device is up.
161 */
162 nn_warn(nn, "Can't change rings while device is up\n");
163 return -EBUSY;
164 }
165
166 /* We don't have separate queues/rings for small/large frames. */ 156 /* We don't have separate queues/rings for small/large frames. */
167 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 157 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
168 return -EINVAL; 158 return -EINVAL;
169 159
170 /* Round up to supported values */ 160 /* Round up to supported values */
171 rxd_cnt = roundup_pow_of_two(ring->rx_pending); 161 rxd_cnt = roundup_pow_of_two(ring->rx_pending);
172 rxd_cnt = max_t(u32, rxd_cnt, NFP_NET_MIN_RX_DESCS);
173 rxd_cnt = min_t(u32, rxd_cnt, NFP_NET_MAX_RX_DESCS);
174
175 txd_cnt = roundup_pow_of_two(ring->tx_pending); 162 txd_cnt = roundup_pow_of_two(ring->tx_pending);
176 txd_cnt = max_t(u32, txd_cnt, NFP_NET_MIN_TX_DESCS);
177 txd_cnt = min_t(u32, txd_cnt, NFP_NET_MAX_TX_DESCS);
178 163
179 if (nn->rxd_cnt != rxd_cnt || nn->txd_cnt != txd_cnt) 164 if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
180 nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", 165 txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
181 nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); 166 return -EINVAL;
182 167
183 nn->rxd_cnt = rxd_cnt; 168 if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
184 nn->txd_cnt = txd_cnt; 169 return 0;
185 170
186 return 0; 171 nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
172 nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
173
174 return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
187} 175}
188 176
189static void nfp_net_get_strings(struct net_device *netdev, 177static void nfp_net_get_strings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index ffd0accc2ec9..2017b0121f5f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2750,7 +2750,7 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
2750int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2750int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2751 enum qed_int_mode int_mode) 2751 enum qed_int_mode int_mode)
2752{ 2752{
2753 int rc; 2753 int rc = 0;
2754 2754
2755 /* Configure AEU signal change to produce attentions */ 2755 /* Configure AEU signal change to produce attentions */
2756 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 2756 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index ef332708e5f2..6d31f92ef2b6 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
18 */ 18 */
19#define DRV_NAME "qlge" 19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "1.00.00.34" 21#define DRV_VERSION "1.00.00.35"
22 22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 24
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b2160d1b9c71..5c1624147778 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -157,6 +157,7 @@ enum ravb_reg {
157 TIC = 0x0378, 157 TIC = 0x0378,
158 TIS = 0x037C, 158 TIS = 0x037C,
159 ISS = 0x0380, 159 ISS = 0x0380,
160 CIE = 0x0384, /* R-Car Gen3 only */
160 GCCR = 0x0390, 161 GCCR = 0x0390,
161 GMTT = 0x0394, 162 GMTT = 0x0394,
162 GPTC = 0x0398, 163 GPTC = 0x0398,
@@ -170,6 +171,15 @@ enum ravb_reg {
170 GCT0 = 0x03B8, 171 GCT0 = 0x03B8,
171 GCT1 = 0x03BC, 172 GCT1 = 0x03BC,
172 GCT2 = 0x03C0, 173 GCT2 = 0x03C0,
174 GIE = 0x03CC, /* R-Car Gen3 only */
175 GID = 0x03D0, /* R-Car Gen3 only */
176 DIL = 0x0440, /* R-Car Gen3 only */
177 RIE0 = 0x0460, /* R-Car Gen3 only */
178 RID0 = 0x0464, /* R-Car Gen3 only */
179 RIE2 = 0x0470, /* R-Car Gen3 only */
180 RID2 = 0x0474, /* R-Car Gen3 only */
181 TIE = 0x0478, /* R-Car Gen3 only */
182 TID = 0x047c, /* R-Car Gen3 only */
173 183
174 /* E-MAC registers */ 184 /* E-MAC registers */
175 ECMR = 0x0500, 185 ECMR = 0x0500,
@@ -556,6 +566,16 @@ enum ISS_BIT {
556 ISS_DPS15 = 0x80000000, 566 ISS_DPS15 = 0x80000000,
557}; 567};
558 568
569/* CIE (R-Car Gen3 only) */
570enum CIE_BIT {
571 CIE_CRIE = 0x00000001,
572 CIE_CTIE = 0x00000100,
573 CIE_RQFM = 0x00010000,
574 CIE_CL0M = 0x00020000,
575 CIE_RFWL = 0x00040000,
576 CIE_RFFL = 0x00080000,
577};
578
559/* GCCR */ 579/* GCCR */
560enum GCCR_BIT { 580enum GCCR_BIT {
561 GCCR_TCR = 0x00000003, 581 GCCR_TCR = 0x00000003,
@@ -592,6 +612,188 @@ enum GIS_BIT {
592 GIS_PTMF = 0x00000004, 612 GIS_PTMF = 0x00000004,
593}; 613};
594 614
615/* GIE (R-Car Gen3 only) */
616enum GIE_BIT {
617 GIE_PTCS = 0x00000001,
618 GIE_PTOS = 0x00000002,
619 GIE_PTMS0 = 0x00000004,
620 GIE_PTMS1 = 0x00000008,
621 GIE_PTMS2 = 0x00000010,
622 GIE_PTMS3 = 0x00000020,
623 GIE_PTMS4 = 0x00000040,
624 GIE_PTMS5 = 0x00000080,
625 GIE_PTMS6 = 0x00000100,
626 GIE_PTMS7 = 0x00000200,
627 GIE_ATCS0 = 0x00010000,
628 GIE_ATCS1 = 0x00020000,
629 GIE_ATCS2 = 0x00040000,
630 GIE_ATCS3 = 0x00080000,
631 GIE_ATCS4 = 0x00100000,
632 GIE_ATCS5 = 0x00200000,
633 GIE_ATCS6 = 0x00400000,
634 GIE_ATCS7 = 0x00800000,
635 GIE_ATCS8 = 0x01000000,
636 GIE_ATCS9 = 0x02000000,
637 GIE_ATCS10 = 0x04000000,
638 GIE_ATCS11 = 0x08000000,
639 GIE_ATCS12 = 0x10000000,
640 GIE_ATCS13 = 0x20000000,
641 GIE_ATCS14 = 0x40000000,
642 GIE_ATCS15 = 0x80000000,
643};
644
645/* GID (R-Car Gen3 only) */
646enum GID_BIT {
647 GID_PTCD = 0x00000001,
648 GID_PTOD = 0x00000002,
649 GID_PTMD0 = 0x00000004,
650 GID_PTMD1 = 0x00000008,
651 GID_PTMD2 = 0x00000010,
652 GID_PTMD3 = 0x00000020,
653 GID_PTMD4 = 0x00000040,
654 GID_PTMD5 = 0x00000080,
655 GID_PTMD6 = 0x00000100,
656 GID_PTMD7 = 0x00000200,
657 GID_ATCD0 = 0x00010000,
658 GID_ATCD1 = 0x00020000,
659 GID_ATCD2 = 0x00040000,
660 GID_ATCD3 = 0x00080000,
661 GID_ATCD4 = 0x00100000,
662 GID_ATCD5 = 0x00200000,
663 GID_ATCD6 = 0x00400000,
664 GID_ATCD7 = 0x00800000,
665 GID_ATCD8 = 0x01000000,
666 GID_ATCD9 = 0x02000000,
667 GID_ATCD10 = 0x04000000,
668 GID_ATCD11 = 0x08000000,
669 GID_ATCD12 = 0x10000000,
670 GID_ATCD13 = 0x20000000,
671 GID_ATCD14 = 0x40000000,
672 GID_ATCD15 = 0x80000000,
673};
674
675/* RIE0 (R-Car Gen3 only) */
676enum RIE0_BIT {
677 RIE0_FRS0 = 0x00000001,
678 RIE0_FRS1 = 0x00000002,
679 RIE0_FRS2 = 0x00000004,
680 RIE0_FRS3 = 0x00000008,
681 RIE0_FRS4 = 0x00000010,
682 RIE0_FRS5 = 0x00000020,
683 RIE0_FRS6 = 0x00000040,
684 RIE0_FRS7 = 0x00000080,
685 RIE0_FRS8 = 0x00000100,
686 RIE0_FRS9 = 0x00000200,
687 RIE0_FRS10 = 0x00000400,
688 RIE0_FRS11 = 0x00000800,
689 RIE0_FRS12 = 0x00001000,
690 RIE0_FRS13 = 0x00002000,
691 RIE0_FRS14 = 0x00004000,
692 RIE0_FRS15 = 0x00008000,
693 RIE0_FRS16 = 0x00010000,
694 RIE0_FRS17 = 0x00020000,
695};
696
697/* RID0 (R-Car Gen3 only) */
698enum RID0_BIT {
699 RID0_FRD0 = 0x00000001,
700 RID0_FRD1 = 0x00000002,
701 RID0_FRD2 = 0x00000004,
702 RID0_FRD3 = 0x00000008,
703 RID0_FRD4 = 0x00000010,
704 RID0_FRD5 = 0x00000020,
705 RID0_FRD6 = 0x00000040,
706 RID0_FRD7 = 0x00000080,
707 RID0_FRD8 = 0x00000100,
708 RID0_FRD9 = 0x00000200,
709 RID0_FRD10 = 0x00000400,
710 RID0_FRD11 = 0x00000800,
711 RID0_FRD12 = 0x00001000,
712 RID0_FRD13 = 0x00002000,
713 RID0_FRD14 = 0x00004000,
714 RID0_FRD15 = 0x00008000,
715 RID0_FRD16 = 0x00010000,
716 RID0_FRD17 = 0x00020000,
717};
718
719/* RIE2 (R-Car Gen3 only) */
720enum RIE2_BIT {
721 RIE2_QFS0 = 0x00000001,
722 RIE2_QFS1 = 0x00000002,
723 RIE2_QFS2 = 0x00000004,
724 RIE2_QFS3 = 0x00000008,
725 RIE2_QFS4 = 0x00000010,
726 RIE2_QFS5 = 0x00000020,
727 RIE2_QFS6 = 0x00000040,
728 RIE2_QFS7 = 0x00000080,
729 RIE2_QFS8 = 0x00000100,
730 RIE2_QFS9 = 0x00000200,
731 RIE2_QFS10 = 0x00000400,
732 RIE2_QFS11 = 0x00000800,
733 RIE2_QFS12 = 0x00001000,
734 RIE2_QFS13 = 0x00002000,
735 RIE2_QFS14 = 0x00004000,
736 RIE2_QFS15 = 0x00008000,
737 RIE2_QFS16 = 0x00010000,
738 RIE2_QFS17 = 0x00020000,
739 RIE2_RFFS = 0x80000000,
740};
741
742/* RID2 (R-Car Gen3 only) */
743enum RID2_BIT {
744 RID2_QFD0 = 0x00000001,
745 RID2_QFD1 = 0x00000002,
746 RID2_QFD2 = 0x00000004,
747 RID2_QFD3 = 0x00000008,
748 RID2_QFD4 = 0x00000010,
749 RID2_QFD5 = 0x00000020,
750 RID2_QFD6 = 0x00000040,
751 RID2_QFD7 = 0x00000080,
752 RID2_QFD8 = 0x00000100,
753 RID2_QFD9 = 0x00000200,
754 RID2_QFD10 = 0x00000400,
755 RID2_QFD11 = 0x00000800,
756 RID2_QFD12 = 0x00001000,
757 RID2_QFD13 = 0x00002000,
758 RID2_QFD14 = 0x00004000,
759 RID2_QFD15 = 0x00008000,
760 RID2_QFD16 = 0x00010000,
761 RID2_QFD17 = 0x00020000,
762 RID2_RFFD = 0x80000000,
763};
764
765/* TIE (R-Car Gen3 only) */
766enum TIE_BIT {
767 TIE_FTS0 = 0x00000001,
768 TIE_FTS1 = 0x00000002,
769 TIE_FTS2 = 0x00000004,
770 TIE_FTS3 = 0x00000008,
771 TIE_TFUS = 0x00000100,
772 TIE_TFWS = 0x00000200,
773 TIE_MFUS = 0x00000400,
774 TIE_MFWS = 0x00000800,
775 TIE_TDPS0 = 0x00010000,
776 TIE_TDPS1 = 0x00020000,
777 TIE_TDPS2 = 0x00040000,
778 TIE_TDPS3 = 0x00080000,
779};
780
781/* TID (R-Car Gen3 only) */
782enum TID_BIT {
783 TID_FTD0 = 0x00000001,
784 TID_FTD1 = 0x00000002,
785 TID_FTD2 = 0x00000004,
786 TID_FTD3 = 0x00000008,
787 TID_TFUD = 0x00000100,
788 TID_TFWD = 0x00000200,
789 TID_MFUD = 0x00000400,
790 TID_MFWD = 0x00000800,
791 TID_TDPD0 = 0x00010000,
792 TID_TDPD1 = 0x00020000,
793 TID_TDPD2 = 0x00040000,
794 TID_TDPD3 = 0x00080000,
795};
796
595/* ECMR */ 797/* ECMR */
596enum ECMR_BIT { 798enum ECMR_BIT {
597 ECMR_PRM = 0x00000001, 799 ECMR_PRM = 0x00000001,
@@ -817,6 +1019,8 @@ struct ravb_private {
817 int duplex; 1019 int duplex;
818 int emac_irq; 1020 int emac_irq;
819 enum ravb_chip_id chip_id; 1021 enum ravb_chip_id chip_id;
1022 int rx_irqs[NUM_RX_QUEUE];
1023 int tx_irqs[NUM_TX_QUEUE];
820 1024
821 unsigned no_avb_link:1; 1025 unsigned no_avb_link:1;
822 unsigned avb_link_active_low:1; 1026 unsigned avb_link_active_low:1;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4e1a7dba7c4a..4b71951e185d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -42,6 +42,16 @@
42 NETIF_MSG_RX_ERR | \ 42 NETIF_MSG_RX_ERR | \
43 NETIF_MSG_TX_ERR) 43 NETIF_MSG_TX_ERR)
44 44
45static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
46 "ch0", /* RAVB_BE */
47 "ch1", /* RAVB_NC */
48};
49
50static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
51 "ch18", /* RAVB_BE */
52 "ch19", /* RAVB_NC */
53};
54
45void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, 55void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
46 u32 set) 56 u32 set)
47{ 57{
@@ -365,6 +375,7 @@ static void ravb_emac_init(struct net_device *ndev)
365/* Device init function for Ethernet AVB */ 375/* Device init function for Ethernet AVB */
366static int ravb_dmac_init(struct net_device *ndev) 376static int ravb_dmac_init(struct net_device *ndev)
367{ 377{
378 struct ravb_private *priv = netdev_priv(ndev);
368 int error; 379 int error;
369 380
370 /* Set CONFIG mode */ 381 /* Set CONFIG mode */
@@ -401,6 +412,12 @@ static int ravb_dmac_init(struct net_device *ndev)
401 ravb_write(ndev, TCCR_TFEN, TCCR); 412 ravb_write(ndev, TCCR_TFEN, TCCR);
402 413
403 /* Interrupt init: */ 414 /* Interrupt init: */
415 if (priv->chip_id == RCAR_GEN3) {
416 /* Clear DIL.DPLx */
417 ravb_write(ndev, 0, DIL);
418 /* Set queue specific interrupt */
419 ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
420 }
404 /* Frame receive */ 421 /* Frame receive */
405 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); 422 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
406 /* Disable FIFO full warning */ 423 /* Disable FIFO full warning */
@@ -643,7 +660,7 @@ static int ravb_stop_dma(struct net_device *ndev)
643} 660}
644 661
645/* E-MAC interrupt handler */ 662/* E-MAC interrupt handler */
646static void ravb_emac_interrupt(struct net_device *ndev) 663static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
647{ 664{
648 struct ravb_private *priv = netdev_priv(ndev); 665 struct ravb_private *priv = netdev_priv(ndev);
649 u32 ecsr, psr; 666 u32 ecsr, psr;
@@ -669,6 +686,18 @@ static void ravb_emac_interrupt(struct net_device *ndev)
669 } 686 }
670} 687}
671 688
689static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
690{
691 struct net_device *ndev = dev_id;
692 struct ravb_private *priv = netdev_priv(ndev);
693
694 spin_lock(&priv->lock);
695 ravb_emac_interrupt_unlocked(ndev);
696 mmiowb();
697 spin_unlock(&priv->lock);
698 return IRQ_HANDLED;
699}
700
672/* Error interrupt handler */ 701/* Error interrupt handler */
673static void ravb_error_interrupt(struct net_device *ndev) 702static void ravb_error_interrupt(struct net_device *ndev)
674{ 703{
@@ -695,6 +724,50 @@ static void ravb_error_interrupt(struct net_device *ndev)
695 } 724 }
696} 725}
697 726
727static bool ravb_queue_interrupt(struct net_device *ndev, int q)
728{
729 struct ravb_private *priv = netdev_priv(ndev);
730 u32 ris0 = ravb_read(ndev, RIS0);
731 u32 ric0 = ravb_read(ndev, RIC0);
732 u32 tis = ravb_read(ndev, TIS);
733 u32 tic = ravb_read(ndev, TIC);
734
735 if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
736 if (napi_schedule_prep(&priv->napi[q])) {
737 /* Mask RX and TX interrupts */
738 if (priv->chip_id == RCAR_GEN2) {
739 ravb_write(ndev, ric0 & ~BIT(q), RIC0);
740 ravb_write(ndev, tic & ~BIT(q), TIC);
741 } else {
742 ravb_write(ndev, BIT(q), RID0);
743 ravb_write(ndev, BIT(q), TID);
744 }
745 __napi_schedule(&priv->napi[q]);
746 } else {
747 netdev_warn(ndev,
748 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
749 ris0, ric0);
750 netdev_warn(ndev,
751 " tx status 0x%08x, tx mask 0x%08x.\n",
752 tis, tic);
753 }
754 return true;
755 }
756 return false;
757}
758
759static bool ravb_timestamp_interrupt(struct net_device *ndev)
760{
761 u32 tis = ravb_read(ndev, TIS);
762
763 if (tis & TIS_TFUF) {
764 ravb_write(ndev, ~TIS_TFUF, TIS);
765 ravb_get_tx_tstamp(ndev);
766 return true;
767 }
768 return false;
769}
770
698static irqreturn_t ravb_interrupt(int irq, void *dev_id) 771static irqreturn_t ravb_interrupt(int irq, void *dev_id)
699{ 772{
700 struct net_device *ndev = dev_id; 773 struct net_device *ndev = dev_id;
@@ -708,46 +781,22 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
708 781
709 /* Received and transmitted interrupts */ 782 /* Received and transmitted interrupts */
710 if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) { 783 if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
711 u32 ris0 = ravb_read(ndev, RIS0);
712 u32 ric0 = ravb_read(ndev, RIC0);
713 u32 tis = ravb_read(ndev, TIS);
714 u32 tic = ravb_read(ndev, TIC);
715 int q; 784 int q;
716 785
717 /* Timestamp updated */ 786 /* Timestamp updated */
718 if (tis & TIS_TFUF) { 787 if (ravb_timestamp_interrupt(ndev))
719 ravb_write(ndev, ~TIS_TFUF, TIS);
720 ravb_get_tx_tstamp(ndev);
721 result = IRQ_HANDLED; 788 result = IRQ_HANDLED;
722 }
723 789
724 /* Network control and best effort queue RX/TX */ 790 /* Network control and best effort queue RX/TX */
725 for (q = RAVB_NC; q >= RAVB_BE; q--) { 791 for (q = RAVB_NC; q >= RAVB_BE; q--) {
726 if (((ris0 & ric0) & BIT(q)) || 792 if (ravb_queue_interrupt(ndev, q))
727 ((tis & tic) & BIT(q))) {
728 if (napi_schedule_prep(&priv->napi[q])) {
729 /* Mask RX and TX interrupts */
730 ric0 &= ~BIT(q);
731 tic &= ~BIT(q);
732 ravb_write(ndev, ric0, RIC0);
733 ravb_write(ndev, tic, TIC);
734 __napi_schedule(&priv->napi[q]);
735 } else {
736 netdev_warn(ndev,
737 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
738 ris0, ric0);
739 netdev_warn(ndev,
740 " tx status 0x%08x, tx mask 0x%08x.\n",
741 tis, tic);
742 }
743 result = IRQ_HANDLED; 793 result = IRQ_HANDLED;
744 }
745 } 794 }
746 } 795 }
747 796
748 /* E-MAC status summary */ 797 /* E-MAC status summary */
749 if (iss & ISS_MS) { 798 if (iss & ISS_MS) {
750 ravb_emac_interrupt(ndev); 799 ravb_emac_interrupt_unlocked(ndev);
751 result = IRQ_HANDLED; 800 result = IRQ_HANDLED;
752 } 801 }
753 802
@@ -757,6 +806,7 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
757 result = IRQ_HANDLED; 806 result = IRQ_HANDLED;
758 } 807 }
759 808
809 /* gPTP interrupt status summary */
760 if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED) 810 if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED)
761 result = IRQ_HANDLED; 811 result = IRQ_HANDLED;
762 812
@@ -765,6 +815,64 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
765 return result; 815 return result;
766} 816}
767 817
818/* Timestamp/Error/gPTP interrupt handler */
819static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
820{
821 struct net_device *ndev = dev_id;
822 struct ravb_private *priv = netdev_priv(ndev);
823 irqreturn_t result = IRQ_NONE;
824 u32 iss;
825
826 spin_lock(&priv->lock);
827 /* Get interrupt status */
828 iss = ravb_read(ndev, ISS);
829
830 /* Timestamp updated */
831 if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
832 result = IRQ_HANDLED;
833
834 /* Error status summary */
835 if (iss & ISS_ES) {
836 ravb_error_interrupt(ndev);
837 result = IRQ_HANDLED;
838 }
839
840 /* gPTP interrupt status summary */
841 if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED)
842 result = IRQ_HANDLED;
843
844 mmiowb();
845 spin_unlock(&priv->lock);
846 return result;
847}
848
849static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
850{
851 struct net_device *ndev = dev_id;
852 struct ravb_private *priv = netdev_priv(ndev);
853 irqreturn_t result = IRQ_NONE;
854
855 spin_lock(&priv->lock);
856
857 /* Network control/Best effort queue RX/TX */
858 if (ravb_queue_interrupt(ndev, q))
859 result = IRQ_HANDLED;
860
861 mmiowb();
862 spin_unlock(&priv->lock);
863 return result;
864}
865
866static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
867{
868 return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
869}
870
871static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
872{
873 return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
874}
875
768static int ravb_poll(struct napi_struct *napi, int budget) 876static int ravb_poll(struct napi_struct *napi, int budget)
769{ 877{
770 struct net_device *ndev = napi->dev; 878 struct net_device *ndev = napi->dev;
@@ -804,8 +912,13 @@ static int ravb_poll(struct napi_struct *napi, int budget)
804 912
805 /* Re-enable RX/TX interrupts */ 913 /* Re-enable RX/TX interrupts */
806 spin_lock_irqsave(&priv->lock, flags); 914 spin_lock_irqsave(&priv->lock, flags);
807 ravb_modify(ndev, RIC0, mask, mask); 915 if (priv->chip_id == RCAR_GEN2) {
808 ravb_modify(ndev, TIC, mask, mask); 916 ravb_modify(ndev, RIC0, mask, mask);
917 ravb_modify(ndev, TIC, mask, mask);
918 } else {
919 ravb_write(ndev, mask, RIE0);
920 ravb_write(ndev, mask, TIE);
921 }
809 mmiowb(); 922 mmiowb();
810 spin_unlock_irqrestore(&priv->lock, flags); 923 spin_unlock_irqrestore(&priv->lock, flags);
811 924
@@ -1208,35 +1321,72 @@ static const struct ethtool_ops ravb_ethtool_ops = {
1208 .get_ts_info = ravb_get_ts_info, 1321 .get_ts_info = ravb_get_ts_info,
1209}; 1322};
1210 1323
1324static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
1325 struct net_device *ndev, struct device *dev,
1326 const char *ch)
1327{
1328 char *name;
1329 int error;
1330
1331 name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
1332 if (!name)
1333 return -ENOMEM;
1334 error = request_irq(irq, handler, 0, name, ndev);
1335 if (error)
1336 netdev_err(ndev, "cannot request IRQ %s\n", name);
1337
1338 return error;
1339}
1340
1211/* Network device open function for Ethernet AVB */ 1341/* Network device open function for Ethernet AVB */
1212static int ravb_open(struct net_device *ndev) 1342static int ravb_open(struct net_device *ndev)
1213{ 1343{
1214 struct ravb_private *priv = netdev_priv(ndev); 1344 struct ravb_private *priv = netdev_priv(ndev);
1345 struct platform_device *pdev = priv->pdev;
1346 struct device *dev = &pdev->dev;
1215 int error; 1347 int error;
1216 1348
1217 napi_enable(&priv->napi[RAVB_BE]); 1349 napi_enable(&priv->napi[RAVB_BE]);
1218 napi_enable(&priv->napi[RAVB_NC]); 1350 napi_enable(&priv->napi[RAVB_NC]);
1219 1351
1220 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name, 1352 if (priv->chip_id == RCAR_GEN2) {
1221 ndev); 1353 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
1222 if (error) { 1354 ndev->name, ndev);
1223 netdev_err(ndev, "cannot request IRQ\n");
1224 goto out_napi_off;
1225 }
1226
1227 if (priv->chip_id == RCAR_GEN3) {
1228 error = request_irq(priv->emac_irq, ravb_interrupt,
1229 IRQF_SHARED, ndev->name, ndev);
1230 if (error) { 1355 if (error) {
1231 netdev_err(ndev, "cannot request IRQ\n"); 1356 netdev_err(ndev, "cannot request IRQ\n");
1232 goto out_free_irq; 1357 goto out_napi_off;
1233 } 1358 }
1359 } else {
1360 error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
1361 dev, "ch22:multi");
1362 if (error)
1363 goto out_napi_off;
1364 error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
1365 dev, "ch24:emac");
1366 if (error)
1367 goto out_free_irq;
1368 error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
1369 ndev, dev, "ch0:rx_be");
1370 if (error)
1371 goto out_free_irq_emac;
1372 error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
1373 ndev, dev, "ch18:tx_be");
1374 if (error)
1375 goto out_free_irq_be_rx;
1376 error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
1377 ndev, dev, "ch1:rx_nc");
1378 if (error)
1379 goto out_free_irq_be_tx;
1380 error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
1381 ndev, dev, "ch19:tx_nc");
1382 if (error)
1383 goto out_free_irq_nc_rx;
1234 } 1384 }
1235 1385
1236 /* Device init */ 1386 /* Device init */
1237 error = ravb_dmac_init(ndev); 1387 error = ravb_dmac_init(ndev);
1238 if (error) 1388 if (error)
1239 goto out_free_irq2; 1389 goto out_free_irq_nc_tx;
1240 ravb_emac_init(ndev); 1390 ravb_emac_init(ndev);
1241 1391
1242 /* Initialise PTP Clock driver */ 1392 /* Initialise PTP Clock driver */
@@ -1256,9 +1406,18 @@ out_ptp_stop:
1256 /* Stop PTP Clock driver */ 1406 /* Stop PTP Clock driver */
1257 if (priv->chip_id == RCAR_GEN2) 1407 if (priv->chip_id == RCAR_GEN2)
1258 ravb_ptp_stop(ndev); 1408 ravb_ptp_stop(ndev);
1259out_free_irq2: 1409out_free_irq_nc_tx:
1260 if (priv->chip_id == RCAR_GEN3) 1410 if (priv->chip_id == RCAR_GEN2)
1261 free_irq(priv->emac_irq, ndev); 1411 goto out_free_irq;
1412 free_irq(priv->tx_irqs[RAVB_NC], ndev);
1413out_free_irq_nc_rx:
1414 free_irq(priv->rx_irqs[RAVB_NC], ndev);
1415out_free_irq_be_tx:
1416 free_irq(priv->tx_irqs[RAVB_BE], ndev);
1417out_free_irq_be_rx:
1418 free_irq(priv->rx_irqs[RAVB_BE], ndev);
1419out_free_irq_emac:
1420 free_irq(priv->emac_irq, ndev);
1262out_free_irq: 1421out_free_irq:
1263 free_irq(ndev->irq, ndev); 1422 free_irq(ndev->irq, ndev);
1264out_napi_off: 1423out_napi_off:
@@ -1377,11 +1536,11 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1377 1536
1378 /* TAG and timestamp required flag */ 1537 /* TAG and timestamp required flag */
1379 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1538 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1380 skb_tx_timestamp(skb);
1381 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; 1539 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
1382 desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12); 1540 desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
1383 } 1541 }
1384 1542
1543 skb_tx_timestamp(skb);
1385 /* Descriptor type must be set after all the above writes */ 1544 /* Descriptor type must be set after all the above writes */
1386 dma_wmb(); 1545 dma_wmb();
1387 desc->die_dt = DT_FEND; 1546 desc->die_dt = DT_FEND;
@@ -1713,6 +1872,7 @@ static int ravb_probe(struct platform_device *pdev)
1713 struct net_device *ndev; 1872 struct net_device *ndev;
1714 int error, irq, q; 1873 int error, irq, q;
1715 struct resource *res; 1874 struct resource *res;
1875 int i;
1716 1876
1717 if (!np) { 1877 if (!np) {
1718 dev_err(&pdev->dev, 1878 dev_err(&pdev->dev,
@@ -1782,6 +1942,22 @@ static int ravb_probe(struct platform_device *pdev)
1782 goto out_release; 1942 goto out_release;
1783 } 1943 }
1784 priv->emac_irq = irq; 1944 priv->emac_irq = irq;
1945 for (i = 0; i < NUM_RX_QUEUE; i++) {
1946 irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
1947 if (irq < 0) {
1948 error = irq;
1949 goto out_release;
1950 }
1951 priv->rx_irqs[i] = irq;
1952 }
1953 for (i = 0; i < NUM_TX_QUEUE; i++) {
1954 irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
1955 if (irq < 0) {
1956 error = irq;
1957 goto out_release;
1958 }
1959 priv->tx_irqs[i] = irq;
1960 }
1785 } 1961 }
1786 1962
1787 priv->chip_id = chip_id; 1963 priv->chip_id = chip_id;
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 57992ccc4657..f1b2cbb336e8 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -194,7 +194,12 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
194 priv->ptp.extts[req->index] = on; 194 priv->ptp.extts[req->index] = on;
195 195
196 spin_lock_irqsave(&priv->lock, flags); 196 spin_lock_irqsave(&priv->lock, flags);
197 ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); 197 if (priv->chip_id == RCAR_GEN2)
198 ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
199 else if (on)
200 ravb_write(ndev, GIE_PTCS, GIE);
201 else
202 ravb_write(ndev, GID_PTCD, GID);
198 mmiowb(); 203 mmiowb();
199 spin_unlock_irqrestore(&priv->lock, flags); 204 spin_unlock_irqrestore(&priv->lock, flags);
200 205
@@ -241,7 +246,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
241 error = ravb_ptp_update_compare(priv, (u32)start_ns); 246 error = ravb_ptp_update_compare(priv, (u32)start_ns);
242 if (!error) { 247 if (!error) {
243 /* Unmask interrupt */ 248 /* Unmask interrupt */
244 ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); 249 if (priv->chip_id == RCAR_GEN2)
250 ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
251 else
252 ravb_write(ndev, GIE_PTMS0, GIE);
245 } 253 }
246 } else { 254 } else {
247 spin_lock_irqsave(&priv->lock, flags); 255 spin_lock_irqsave(&priv->lock, flags);
@@ -250,7 +258,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
250 perout->period = 0; 258 perout->period = 0;
251 259
252 /* Mask interrupt */ 260 /* Mask interrupt */
253 ravb_modify(ndev, GIC, GIC_PTME, 0); 261 if (priv->chip_id == RCAR_GEN2)
262 ravb_modify(ndev, GIC, GIC_PTME, 0);
263 else
264 ravb_write(ndev, GID_PTMD0, GID);
254 } 265 }
255 mmiowb(); 266 mmiowb();
256 spin_unlock_irqrestore(&priv->lock, flags); 267 spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index b02eed12bfc5..73427e29df2a 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -155,11 +155,11 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
155 return 0; 155 return 0;
156 156
157err_rx_irq_unmap: 157err_rx_irq_unmap:
158 while (--i) 158 while (i--)
159 irq_dispose_mapping(priv->rxq[i]->irq_no); 159 irq_dispose_mapping(priv->rxq[i]->irq_no);
160 i = SXGBE_TX_QUEUES; 160 i = SXGBE_TX_QUEUES;
161err_tx_irq_unmap: 161err_tx_irq_unmap:
162 while (--i) 162 while (i--)
163 irq_dispose_mapping(priv->txq[i]->irq_no); 163 irq_dispose_mapping(priv->txq[i]->irq_no);
164 irq_dispose_mapping(priv->irq); 164 irq_dispose_mapping(priv->irq);
165err_drv_remove: 165err_drv_remove:
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 3f5711061432..a733868a43aa 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1791,9 +1791,11 @@ static int smc911x_probe(struct net_device *dev)
1791 unsigned int val, chip_id, revision; 1791 unsigned int val, chip_id, revision;
1792 const char *version_string; 1792 const char *version_string;
1793 unsigned long irq_flags; 1793 unsigned long irq_flags;
1794#ifdef SMC_USE_DMA
1794 struct dma_slave_config config; 1795 struct dma_slave_config config;
1795 dma_cap_mask_t mask; 1796 dma_cap_mask_t mask;
1796 struct pxad_param param; 1797 struct pxad_param param;
1798#endif
1797 1799
1798 DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); 1800 DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
1799 1801
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index b3901616f4f6..0fb362d5a722 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,7 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ 2stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
3 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ 3 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
4 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ 4 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
5 mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y) 5 mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
6 dwmac4_dma.o dwmac4_lib.o dwmac4_core.o $(stmmac-y)
6 7
7# Ordering matters. Generic driver must be last. 8# Ordering matters. Generic driver must be last.
8obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o 9obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index f96d257308b0..fc60368df2e7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -41,6 +41,8 @@
41/* Synopsys Core versions */ 41/* Synopsys Core versions */
42#define DWMAC_CORE_3_40 0x34 42#define DWMAC_CORE_3_40 0x34
43#define DWMAC_CORE_3_50 0x35 43#define DWMAC_CORE_3_50 0x35
44#define DWMAC_CORE_4_00 0x40
45#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
44 46
45#define DMA_TX_SIZE 512 47#define DMA_TX_SIZE 512
46#define DMA_RX_SIZE 512 48#define DMA_RX_SIZE 512
@@ -167,6 +169,9 @@ struct stmmac_extra_stats {
167 unsigned long mtl_rx_fifo_ctrl_active; 169 unsigned long mtl_rx_fifo_ctrl_active;
168 unsigned long mac_rx_frame_ctrl_fifo; 170 unsigned long mac_rx_frame_ctrl_fifo;
169 unsigned long mac_gmii_rx_proto_engine; 171 unsigned long mac_gmii_rx_proto_engine;
172 /* TSO */
173 unsigned long tx_tso_frames;
174 unsigned long tx_tso_nfrags;
170}; 175};
171 176
172/* CSR Frequency Access Defines*/ 177/* CSR Frequency Access Defines*/
@@ -243,6 +248,7 @@ enum rx_frame_status {
243 csum_none = 0x2, 248 csum_none = 0x2,
244 llc_snap = 0x4, 249 llc_snap = 0x4,
245 dma_own = 0x8, 250 dma_own = 0x8,
251 rx_not_ls = 0x10,
246}; 252};
247 253
248/* Tx status */ 254/* Tx status */
@@ -269,6 +275,7 @@ enum dma_irq_status {
269#define CORE_PCS_ANE_COMPLETE (1 << 5) 275#define CORE_PCS_ANE_COMPLETE (1 << 5)
270#define CORE_PCS_LINK_STATUS (1 << 6) 276#define CORE_PCS_LINK_STATUS (1 << 6)
271#define CORE_RGMII_IRQ (1 << 7) 277#define CORE_RGMII_IRQ (1 << 7)
278#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
272 279
273/* Physical Coding Sublayer */ 280/* Physical Coding Sublayer */
274struct rgmii_adv { 281struct rgmii_adv {
@@ -300,8 +307,10 @@ struct dma_features {
300 /* 802.3az - Energy-Efficient Ethernet (EEE) */ 307 /* 802.3az - Energy-Efficient Ethernet (EEE) */
301 unsigned int eee; 308 unsigned int eee;
302 unsigned int av; 309 unsigned int av;
310 unsigned int tsoen;
303 /* TX and RX csum */ 311 /* TX and RX csum */
304 unsigned int tx_coe; 312 unsigned int tx_coe;
313 unsigned int rx_coe;
305 unsigned int rx_coe_type1; 314 unsigned int rx_coe_type1;
306 unsigned int rx_coe_type2; 315 unsigned int rx_coe_type2;
307 unsigned int rxfifo_over_2048; 316 unsigned int rxfifo_over_2048;
@@ -348,6 +357,10 @@ struct stmmac_desc_ops {
348 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, 357 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
349 bool csum_flag, int mode, bool tx_own, 358 bool csum_flag, int mode, bool tx_own,
350 bool ls); 359 bool ls);
360 void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
361 int len2, bool tx_own, bool ls,
362 unsigned int tcphdrlen,
363 unsigned int tcppayloadlen);
351 /* Set/get the owner of the descriptor */ 364 /* Set/get the owner of the descriptor */
352 void (*set_tx_owner) (struct dma_desc *p); 365 void (*set_tx_owner) (struct dma_desc *p);
353 int (*get_tx_owner) (struct dma_desc *p); 366 int (*get_tx_owner) (struct dma_desc *p);
@@ -380,6 +393,10 @@ struct stmmac_desc_ops {
380 u64(*get_timestamp) (void *desc, u32 ats); 393 u64(*get_timestamp) (void *desc, u32 ats);
381 /* get rx timestamp status */ 394 /* get rx timestamp status */
382 int (*get_rx_timestamp_status) (void *desc, u32 ats); 395 int (*get_rx_timestamp_status) (void *desc, u32 ats);
396 /* Display ring */
397 void (*display_ring)(void *head, unsigned int size, bool rx);
398 /* set MSS via context descriptor */
399 void (*set_mss)(struct dma_desc *p, unsigned int mss);
383}; 400};
384 401
385extern const struct stmmac_desc_ops enh_desc_ops; 402extern const struct stmmac_desc_ops enh_desc_ops;
@@ -412,9 +429,15 @@ struct stmmac_dma_ops {
412 int (*dma_interrupt) (void __iomem *ioaddr, 429 int (*dma_interrupt) (void __iomem *ioaddr,
413 struct stmmac_extra_stats *x); 430 struct stmmac_extra_stats *x);
414 /* If supported then get the optional core features */ 431 /* If supported then get the optional core features */
415 unsigned int (*get_hw_feature) (void __iomem *ioaddr); 432 void (*get_hw_feature)(void __iomem *ioaddr,
433 struct dma_features *dma_cap);
416 /* Program the HW RX Watchdog */ 434 /* Program the HW RX Watchdog */
417 void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt); 435 void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
436 void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
437 void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
438 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
439 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
440 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
418}; 441};
419 442
420struct mac_device_info; 443struct mac_device_info;
@@ -463,6 +486,7 @@ struct stmmac_hwtimestamp {
463}; 486};
464 487
465extern const struct stmmac_hwtimestamp stmmac_ptp; 488extern const struct stmmac_hwtimestamp stmmac_ptp;
489extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
466 490
467struct mac_link { 491struct mac_link {
468 int port; 492 int port;
@@ -495,7 +519,6 @@ struct mac_device_info {
495 const struct stmmac_hwtimestamp *ptp; 519 const struct stmmac_hwtimestamp *ptp;
496 struct mii_regs mii; /* MII register Addresses */ 520 struct mii_regs mii; /* MII register Addresses */
497 struct mac_link link; 521 struct mac_link link;
498 unsigned int synopsys_uid;
499 void __iomem *pcsr; /* vpointer to device CSRs */ 522 void __iomem *pcsr; /* vpointer to device CSRs */
500 int multicast_filter_bins; 523 int multicast_filter_bins;
501 int unicast_filter_entries; 524 int unicast_filter_entries;
@@ -504,18 +527,47 @@ struct mac_device_info {
504}; 527};
505 528
506struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, 529struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
507 int perfect_uc_entries); 530 int perfect_uc_entries,
508struct mac_device_info *dwmac100_setup(void __iomem *ioaddr); 531 int *synopsys_id);
532struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id);
533struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
534 int perfect_uc_entries, int *synopsys_id);
509 535
510void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], 536void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
511 unsigned int high, unsigned int low); 537 unsigned int high, unsigned int low);
512void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, 538void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
513 unsigned int high, unsigned int low); 539 unsigned int high, unsigned int low);
514
515void stmmac_set_mac(void __iomem *ioaddr, bool enable); 540void stmmac_set_mac(void __iomem *ioaddr, bool enable);
516 541
542void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
543 unsigned int high, unsigned int low);
544void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
545 unsigned int high, unsigned int low);
546void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
547
517void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); 548void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
518extern const struct stmmac_mode_ops ring_mode_ops; 549extern const struct stmmac_mode_ops ring_mode_ops;
519extern const struct stmmac_mode_ops chain_mode_ops; 550extern const struct stmmac_mode_ops chain_mode_ops;
520 551extern const struct stmmac_desc_ops dwmac4_desc_ops;
552
553/**
554 * stmmac_get_synopsys_id - return the SYINID.
555 * @priv: driver private structure
556 * Description: this simple function is to decode and return the SYINID
557 * starting from the HW core register.
558 */
559static inline u32 stmmac_get_synopsys_id(u32 hwid)
560{
561 /* Check Synopsys Id (not available on old chips) */
562 if (likely(hwid)) {
563 u32 uid = ((hwid & 0x0000ff00) >> 8);
564 u32 synid = (hwid & 0x000000ff);
565
566 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
567 uid, synid);
568
569 return synid;
570 }
571 return 0;
572}
521#endif /* __COMMON_H__ */ 573#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index c2941172f6d1..fb1eb578e34e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -491,7 +491,8 @@ static const struct stmmac_ops dwmac1000_ops = {
491}; 491};
492 492
493struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, 493struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
494 int perfect_uc_entries) 494 int perfect_uc_entries,
495 int *synopsys_id)
495{ 496{
496 struct mac_device_info *mac; 497 struct mac_device_info *mac;
497 u32 hwid = readl(ioaddr + GMAC_VERSION); 498 u32 hwid = readl(ioaddr + GMAC_VERSION);
@@ -516,7 +517,9 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
516 mac->link.speed = GMAC_CONTROL_FES; 517 mac->link.speed = GMAC_CONTROL_FES;
517 mac->mii.addr = GMAC_MII_ADDR; 518 mac->mii.addr = GMAC_MII_ADDR;
518 mac->mii.data = GMAC_MII_DATA; 519 mac->mii.data = GMAC_MII_DATA;
519 mac->synopsys_uid = hwid; 520
521 /* Get and dump the chip ID */
522 *synopsys_id = stmmac_get_synopsys_id(hwid);
520 523
521 return mac; 524 return mac;
522} 525}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index da32d6037e3e..990746955216 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -215,9 +215,40 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
215 } 215 }
216} 216}
217 217
218static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr) 218static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
219 struct dma_features *dma_cap)
219{ 220{
220 return readl(ioaddr + DMA_HW_FEATURE); 221 u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
222
223 dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
224 dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
225 dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
226 dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
227 dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
228 dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
229 dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
230 dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
231 dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
232 /* MMC */
233 dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
234 /* IEEE 1588-2002 */
235 dma_cap->time_stamp =
236 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
237 /* IEEE 1588-2008 */
238 dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
239 /* 802.3az - Energy-Efficient Ethernet (EEE) */
240 dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
241 dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
242 /* TX and RX csum */
243 dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
244 dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
245 dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
246 dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
247 /* TX and RX number of channels */
248 dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
249 dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
250 /* Alternate (enhanced) DESC mode */
251 dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
221} 252}
222 253
223static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) 254static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index f8dd773f246c..6418b2e07619 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -173,7 +173,7 @@ static const struct stmmac_ops dwmac100_ops = {
173 .get_umac_addr = dwmac100_get_umac_addr, 173 .get_umac_addr = dwmac100_get_umac_addr,
174}; 174};
175 175
176struct mac_device_info *dwmac100_setup(void __iomem *ioaddr) 176struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
177{ 177{
178 struct mac_device_info *mac; 178 struct mac_device_info *mac;
179 179
@@ -192,7 +192,8 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
192 mac->link.speed = 0; 192 mac->link.speed = 0;
193 mac->mii.addr = MAC_MII_ADDR; 193 mac->mii.addr = MAC_MII_ADDR;
194 mac->mii.data = MAC_MII_DATA; 194 mac->mii.data = MAC_MII_DATA;
195 mac->synopsys_uid = 0; 195 /* Synopsys Id is not available on old chips */
196 *synopsys_id = 0;
196 197
197 return mac; 198 return mac;
198} 199}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
new file mode 100644
index 000000000000..bc50952a18e7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -0,0 +1,255 @@
1/*
2 * DWMAC4 Header file.
3 *
4 * Copyright (C) 2015 STMicroelectronics Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 */
12
13#ifndef __DWMAC4_H__
14#define __DWMAC4_H__
15
16#include "common.h"
17
18/* MAC registers */
19#define GMAC_CONFIG 0x00000000
20#define GMAC_PACKET_FILTER 0x00000008
21#define GMAC_HASH_TAB_0_31 0x00000010
22#define GMAC_HASH_TAB_32_63 0x00000014
23#define GMAC_RX_FLOW_CTRL 0x00000090
24#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
25#define GMAC_INT_STATUS 0x000000b0
26#define GMAC_INT_EN 0x000000b4
27#define GMAC_AN_CTRL 0x000000e0
28#define GMAC_AN_STATUS 0x000000e4
29#define GMAC_AN_ADV 0x000000e8
30#define GMAC_AN_LPA 0x000000ec
31#define GMAC_PMT 0x000000c0
32#define GMAC_VERSION 0x00000110
33#define GMAC_DEBUG 0x00000114
34#define GMAC_HW_FEATURE0 0x0000011c
35#define GMAC_HW_FEATURE1 0x00000120
36#define GMAC_HW_FEATURE2 0x00000124
37#define GMAC_MDIO_ADDR 0x00000200
38#define GMAC_MDIO_DATA 0x00000204
39#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
40#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
41
42/* MAC Packet Filtering */
43#define GMAC_PACKET_FILTER_PR BIT(0)
44#define GMAC_PACKET_FILTER_HMC BIT(2)
45#define GMAC_PACKET_FILTER_PM BIT(4)
46
47#define GMAC_MAX_PERFECT_ADDRESSES 128
48
49/* MAC Flow Control RX */
50#define GMAC_RX_FLOW_CTRL_RFE BIT(0)
51
52/* MAC Flow Control TX */
53#define GMAC_TX_FLOW_CTRL_TFE BIT(1)
54#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
55
56/* MAC Interrupt bitmap*/
57#define GMAC_INT_PMT_EN BIT(4)
58#define GMAC_INT_LPI_EN BIT(5)
59
60enum dwmac4_irq_status {
61 time_stamp_irq = 0x00001000,
62 mmc_rx_csum_offload_irq = 0x00000800,
63 mmc_tx_irq = 0x00000400,
64 mmc_rx_irq = 0x00000200,
65 mmc_irq = 0x00000100,
66 pmt_irq = 0x00000010,
67 pcs_ane_irq = 0x00000004,
68 pcs_link_irq = 0x00000002,
69};
70
71/* MAC Auto-Neg bitmap*/
72#define GMAC_AN_CTRL_RAN BIT(9)
73#define GMAC_AN_CTRL_ANE BIT(12)
74#define GMAC_AN_CTRL_ELE BIT(14)
75#define GMAC_AN_FD BIT(5)
76#define GMAC_AN_HD BIT(6)
77#define GMAC_AN_PSE_MASK GENMASK(8, 7)
78#define GMAC_AN_PSE_SHIFT 7
79
80/* MAC PMT bitmap */
81enum power_event {
82 pointer_reset = 0x80000000,
83 global_unicast = 0x00000200,
84 wake_up_rx_frame = 0x00000040,
85 magic_frame = 0x00000020,
86 wake_up_frame_en = 0x00000004,
87 magic_pkt_en = 0x00000002,
88 power_down = 0x00000001,
89};
90
91/* MAC Debug bitmap */
92#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
93#define GMAC_DEBUG_TFCSTS_SHIFT 17
94#define GMAC_DEBUG_TFCSTS_IDLE 0
95#define GMAC_DEBUG_TFCSTS_WAIT 1
96#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2
97#define GMAC_DEBUG_TFCSTS_XFER 3
98#define GMAC_DEBUG_TPESTS BIT(16)
99#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1)
100#define GMAC_DEBUG_RFCFCSTS_SHIFT 1
101#define GMAC_DEBUG_RPESTS BIT(0)
102
103/* MAC config */
104#define GMAC_CONFIG_IPC BIT(27)
105#define GMAC_CONFIG_2K BIT(22)
106#define GMAC_CONFIG_ACS BIT(20)
107#define GMAC_CONFIG_BE BIT(18)
108#define GMAC_CONFIG_JD BIT(17)
109#define GMAC_CONFIG_JE BIT(16)
110#define GMAC_CONFIG_PS BIT(15)
111#define GMAC_CONFIG_FES BIT(14)
112#define GMAC_CONFIG_DM BIT(13)
113#define GMAC_CONFIG_DCRS BIT(9)
114#define GMAC_CONFIG_TE BIT(1)
115#define GMAC_CONFIG_RE BIT(0)
116
117/* MAC HW features0 bitmap */
118#define GMAC_HW_FEAT_ADDMAC BIT(18)
119#define GMAC_HW_FEAT_RXCOESEL BIT(16)
120#define GMAC_HW_FEAT_TXCOSEL BIT(14)
121#define GMAC_HW_FEAT_EEESEL BIT(13)
122#define GMAC_HW_FEAT_TSSEL BIT(12)
123#define GMAC_HW_FEAT_MMCSEL BIT(8)
124#define GMAC_HW_FEAT_MGKSEL BIT(7)
125#define GMAC_HW_FEAT_RWKSEL BIT(6)
126#define GMAC_HW_FEAT_SMASEL BIT(5)
127#define GMAC_HW_FEAT_VLHASH BIT(4)
128#define GMAC_HW_FEAT_PCSSEL BIT(3)
129#define GMAC_HW_FEAT_HDSEL BIT(2)
130#define GMAC_HW_FEAT_GMIISEL BIT(1)
131#define GMAC_HW_FEAT_MIISEL BIT(0)
132
133/* MAC HW features1 bitmap */
134#define GMAC_HW_FEAT_AVSEL BIT(20)
135#define GMAC_HW_TSOEN BIT(18)
136
137/* MAC HW features2 bitmap */
138#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
139#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12)
140
141/* MAC HW ADDR regs */
142#define GMAC_HI_DCS GENMASK(18, 16)
143#define GMAC_HI_DCS_SHIFT 16
144#define GMAC_HI_REG_AE BIT(31)
145
146/* MTL registers */
147#define MTL_INT_STATUS 0x00000c20
148#define MTL_INT_Q0 BIT(0)
149
150#define MTL_CHAN_BASE_ADDR 0x00000d00
151#define MTL_CHAN_BASE_OFFSET 0x40
152#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \
153 (x * MTL_CHAN_BASE_OFFSET))
154
155#define MTL_CHAN_TX_OP_MODE(x) MTL_CHANX_BASE_ADDR(x)
156#define MTL_CHAN_TX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x8)
157#define MTL_CHAN_INT_CTRL(x) (MTL_CHANX_BASE_ADDR(x) + 0x2c)
158#define MTL_CHAN_RX_OP_MODE(x) (MTL_CHANX_BASE_ADDR(x) + 0x30)
159#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
160
161#define MTL_OP_MODE_RSF BIT(5)
162#define MTL_OP_MODE_TSF BIT(1)
163
164#define MTL_OP_MODE_TTC_MASK 0x70
165#define MTL_OP_MODE_TTC_SHIFT 4
166
167#define MTL_OP_MODE_TTC_32 0
168#define MTL_OP_MODE_TTC_64 (1 << MTL_OP_MODE_TTC_SHIFT)
169#define MTL_OP_MODE_TTC_96 (2 << MTL_OP_MODE_TTC_SHIFT)
170#define MTL_OP_MODE_TTC_128 (3 << MTL_OP_MODE_TTC_SHIFT)
171#define MTL_OP_MODE_TTC_192 (4 << MTL_OP_MODE_TTC_SHIFT)
172#define MTL_OP_MODE_TTC_256 (5 << MTL_OP_MODE_TTC_SHIFT)
173#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
174#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
175
176#define MTL_OP_MODE_RTC_MASK 0x18
177#define MTL_OP_MODE_RTC_SHIFT 3
178
179#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT)
180#define MTL_OP_MODE_RTC_64 0
181#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
182#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
183
184/* MTL debug */
185#define MTL_DEBUG_TXSTSFSTS BIT(5)
186#define MTL_DEBUG_TXFSTS BIT(4)
187#define MTL_DEBUG_TWCSTS BIT(3)
188
189/* MTL debug: Tx FIFO Read Controller Status */
190#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
191#define MTL_DEBUG_TRCSTS_SHIFT 1
192#define MTL_DEBUG_TRCSTS_IDLE 0
193#define MTL_DEBUG_TRCSTS_READ 1
194#define MTL_DEBUG_TRCSTS_TXW 2
195#define MTL_DEBUG_TRCSTS_WRITE 3
196#define MTL_DEBUG_TXPAUSED BIT(0)
197
198/* MAC debug: GMII or MII Transmit Protocol Engine Status */
199#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
200#define MTL_DEBUG_RXFSTS_SHIFT 4
201#define MTL_DEBUG_RXFSTS_EMPTY 0
202#define MTL_DEBUG_RXFSTS_BT 1
203#define MTL_DEBUG_RXFSTS_AT 2
204#define MTL_DEBUG_RXFSTS_FULL 3
205#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
206#define MTL_DEBUG_RRCSTS_SHIFT 1
207#define MTL_DEBUG_RRCSTS_IDLE 0
208#define MTL_DEBUG_RRCSTS_RDATA 1
209#define MTL_DEBUG_RRCSTS_RSTAT 2
210#define MTL_DEBUG_RRCSTS_FLUSH 3
211#define MTL_DEBUG_RWCSTS BIT(0)
212
213/* MTL interrupt */
214#define MTL_RX_OVERFLOW_INT_EN BIT(24)
215#define MTL_RX_OVERFLOW_INT BIT(16)
216
217/* Default operating mode of the MAC */
218#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \
219 GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
220
221/* To dump the core regs excluding the Address Registers */
222#define GMAC_REG_NUM 132
223
224/* MTL debug */
225#define MTL_DEBUG_TXSTSFSTS BIT(5)
226#define MTL_DEBUG_TXFSTS BIT(4)
227#define MTL_DEBUG_TWCSTS BIT(3)
228
229/* MTL debug: Tx FIFO Read Controller Status */
230#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
231#define MTL_DEBUG_TRCSTS_SHIFT 1
232#define MTL_DEBUG_TRCSTS_IDLE 0
233#define MTL_DEBUG_TRCSTS_READ 1
234#define MTL_DEBUG_TRCSTS_TXW 2
235#define MTL_DEBUG_TRCSTS_WRITE 3
236#define MTL_DEBUG_TXPAUSED BIT(0)
237
238/* MAC debug: GMII or MII Transmit Protocol Engine Status */
239#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
240#define MTL_DEBUG_RXFSTS_SHIFT 4
241#define MTL_DEBUG_RXFSTS_EMPTY 0
242#define MTL_DEBUG_RXFSTS_BT 1
243#define MTL_DEBUG_RXFSTS_AT 2
244#define MTL_DEBUG_RXFSTS_FULL 3
245#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
246#define MTL_DEBUG_RRCSTS_SHIFT 1
247#define MTL_DEBUG_RRCSTS_IDLE 0
248#define MTL_DEBUG_RRCSTS_RDATA 1
249#define MTL_DEBUG_RRCSTS_RSTAT 2
250#define MTL_DEBUG_RRCSTS_FLUSH 3
251#define MTL_DEBUG_RWCSTS BIT(0)
252
253extern const struct stmmac_dma_ops dwmac4_dma_ops;
254extern const struct stmmac_dma_ops dwmac410_dma_ops;
255#endif /* __DWMAC4_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
new file mode 100644
index 000000000000..4f7283d05588
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -0,0 +1,407 @@
1/*
2 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 * DWC Ether MAC version 4.00 has been used for developing this code.
4 *
5 * This only implements the mac core functions for this chip.
6 *
7 * Copyright (C) 2015 STMicroelectronics Ltd
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * Author: Alexandre Torgue <alexandre.torgue@st.com>
14 */
15
16#include <linux/crc32.h>
17#include <linux/slab.h>
18#include <linux/ethtool.h>
19#include <linux/io.h>
20#include "dwmac4.h"
21
22static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
23{
24 void __iomem *ioaddr = hw->pcsr;
25 u32 value = readl(ioaddr + GMAC_CONFIG);
26
27 value |= GMAC_CORE_INIT;
28
29 if (mtu > 1500)
30 value |= GMAC_CONFIG_2K;
31 if (mtu > 2000)
32 value |= GMAC_CONFIG_JE;
33
34 writel(value, ioaddr + GMAC_CONFIG);
35
36 /* Mask GMAC interrupts */
37 writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN);
38}
39
40static void dwmac4_dump_regs(struct mac_device_info *hw)
41{
42 void __iomem *ioaddr = hw->pcsr;
43 int i;
44
45 pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr);
46
47 for (i = 0; i < GMAC_REG_NUM; i++) {
48 int offset = i * 4;
49
50 pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
51 offset, readl(ioaddr + offset));
52 }
53}
54
55static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
56{
57 void __iomem *ioaddr = hw->pcsr;
58 u32 value = readl(ioaddr + GMAC_CONFIG);
59
60 if (hw->rx_csum)
61 value |= GMAC_CONFIG_IPC;
62 else
63 value &= ~GMAC_CONFIG_IPC;
64
65 writel(value, ioaddr + GMAC_CONFIG);
66
67 value = readl(ioaddr + GMAC_CONFIG);
68
69 return !!(value & GMAC_CONFIG_IPC);
70}
71
72static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
73{
74 void __iomem *ioaddr = hw->pcsr;
75 unsigned int pmt = 0;
76
77 if (mode & WAKE_MAGIC) {
78 pr_debug("GMAC: WOL Magic frame\n");
79 pmt |= power_down | magic_pkt_en;
80 }
81 if (mode & WAKE_UCAST) {
82 pr_debug("GMAC: WOL on global unicast\n");
83 pmt |= global_unicast;
84 }
85
86 writel(pmt, ioaddr + GMAC_PMT);
87}
88
89static void dwmac4_set_umac_addr(struct mac_device_info *hw,
90 unsigned char *addr, unsigned int reg_n)
91{
92 void __iomem *ioaddr = hw->pcsr;
93
94 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
95 GMAC_ADDR_LOW(reg_n));
96}
97
98static void dwmac4_get_umac_addr(struct mac_device_info *hw,
99 unsigned char *addr, unsigned int reg_n)
100{
101 void __iomem *ioaddr = hw->pcsr;
102
103 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
104 GMAC_ADDR_LOW(reg_n));
105}
106
107static void dwmac4_set_filter(struct mac_device_info *hw,
108 struct net_device *dev)
109{
110 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
111 unsigned int value = 0;
112
113 if (dev->flags & IFF_PROMISC) {
114 value = GMAC_PACKET_FILTER_PR;
115 } else if ((dev->flags & IFF_ALLMULTI) ||
116 (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
117 /* Pass all multi */
118 value = GMAC_PACKET_FILTER_PM;
119 /* Set the 64 bits of the HASH tab. To be updated if taller
120 * hash table is used
121 */
122 writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31);
123 writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63);
124 } else if (!netdev_mc_empty(dev)) {
125 u32 mc_filter[2];
126 struct netdev_hw_addr *ha;
127
128 /* Hash filter for multicast */
129 value = GMAC_PACKET_FILTER_HMC;
130
131 memset(mc_filter, 0, sizeof(mc_filter));
132 netdev_for_each_mc_addr(ha, dev) {
133 /* The upper 6 bits of the calculated CRC are used to
134 * index the content of the Hash Table Reg 0 and 1.
135 */
136 int bit_nr =
137 (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26);
138 /* The most significant bit determines the register
139 * to use while the other 5 bits determines the bit
140 * within the selected register
141 */
142 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F));
143 }
144 writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31);
145 writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63);
146 }
147
148 /* Handle multiple unicast addresses */
149 if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
150 /* Switch to promiscuous mode if more than 128 addrs
151 * are required
152 */
153 value |= GMAC_PACKET_FILTER_PR;
154 } else if (!netdev_uc_empty(dev)) {
155 int reg = 1;
156 struct netdev_hw_addr *ha;
157
158 netdev_for_each_uc_addr(ha, dev) {
159 dwmac4_set_umac_addr(ioaddr, ha->addr, reg);
160 reg++;
161 }
162 }
163
164 writel(value, ioaddr + GMAC_PACKET_FILTER);
165}
166
167static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
168 unsigned int fc, unsigned int pause_time)
169{
170 void __iomem *ioaddr = hw->pcsr;
171 u32 channel = STMMAC_CHAN0; /* FIXME */
172 unsigned int flow = 0;
173
174 pr_debug("GMAC Flow-Control:\n");
175 if (fc & FLOW_RX) {
176 pr_debug("\tReceive Flow-Control ON\n");
177 flow |= GMAC_RX_FLOW_CTRL_RFE;
178 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
179 }
180 if (fc & FLOW_TX) {
181 pr_debug("\tTransmit Flow-Control ON\n");
182 flow |= GMAC_TX_FLOW_CTRL_TFE;
183 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
184
185 if (duplex) {
186 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
187 flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
188 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
189 }
190 }
191}
192
193static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart)
194{
195 void __iomem *ioaddr = hw->pcsr;
196
197 /* auto negotiation enable and External Loopback enable */
198 u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
199
200 if (restart)
201 value |= GMAC_AN_CTRL_RAN;
202
203 writel(value, ioaddr + GMAC_AN_CTRL);
204}
205
206static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv)
207{
208 void __iomem *ioaddr = hw->pcsr;
209 u32 value = readl(ioaddr + GMAC_AN_ADV);
210
211 if (value & GMAC_AN_FD)
212 adv->duplex = DUPLEX_FULL;
213 if (value & GMAC_AN_HD)
214 adv->duplex |= DUPLEX_HALF;
215
216 adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
217
218 value = readl(ioaddr + GMAC_AN_LPA);
219
220 if (value & GMAC_AN_FD)
221 adv->lp_duplex = DUPLEX_FULL;
222 if (value & GMAC_AN_HD)
223 adv->lp_duplex = DUPLEX_HALF;
224
225 adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
226}
227
228static int dwmac4_irq_status(struct mac_device_info *hw,
229 struct stmmac_extra_stats *x)
230{
231 void __iomem *ioaddr = hw->pcsr;
232 u32 mtl_int_qx_status;
233 u32 intr_status;
234 int ret = 0;
235
236 intr_status = readl(ioaddr + GMAC_INT_STATUS);
237
238 /* Not used events (e.g. MMC interrupts) are not handled. */
239 if ((intr_status & mmc_tx_irq))
240 x->mmc_tx_irq_n++;
241 if (unlikely(intr_status & mmc_rx_irq))
242 x->mmc_rx_irq_n++;
243 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
244 x->mmc_rx_csum_offload_irq_n++;
245 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
246 if (unlikely(intr_status & pmt_irq)) {
247 readl(ioaddr + GMAC_PMT);
248 x->irq_receive_pmt_irq_n++;
249 }
250
251 if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
252 readl(ioaddr + GMAC_AN_STATUS);
253 x->irq_pcs_ane_n++;
254 }
255
256 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
257 /* Check MTL Interrupt: Currently only one queue is used: Q0. */
258 if (mtl_int_qx_status & MTL_INT_Q0) {
259 /* read Queue 0 Interrupt status */
260 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
261
262 if (status & MTL_RX_OVERFLOW_INT) {
263 /* clear Interrupt */
264 writel(status | MTL_RX_OVERFLOW_INT,
265 ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
266 ret = CORE_IRQ_MTL_RX_OVERFLOW;
267 }
268 }
269
270 return ret;
271}
272
273static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
274{
275 u32 value;
276
277 /* Currently only channel 0 is supported */
278 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
279
280 if (value & MTL_DEBUG_TXSTSFSTS)
281 x->mtl_tx_status_fifo_full++;
282 if (value & MTL_DEBUG_TXFSTS)
283 x->mtl_tx_fifo_not_empty++;
284 if (value & MTL_DEBUG_TWCSTS)
285 x->mmtl_fifo_ctrl++;
286 if (value & MTL_DEBUG_TRCSTS_MASK) {
287 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
288 >> MTL_DEBUG_TRCSTS_SHIFT;
289 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
290 x->mtl_tx_fifo_read_ctrl_write++;
291 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
292 x->mtl_tx_fifo_read_ctrl_wait++;
293 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
294 x->mtl_tx_fifo_read_ctrl_read++;
295 else
296 x->mtl_tx_fifo_read_ctrl_idle++;
297 }
298 if (value & MTL_DEBUG_TXPAUSED)
299 x->mac_tx_in_pause++;
300
301 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
302
303 if (value & MTL_DEBUG_RXFSTS_MASK) {
304 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
305 >> MTL_DEBUG_RRCSTS_SHIFT;
306
307 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
308 x->mtl_rx_fifo_fill_level_full++;
309 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
310 x->mtl_rx_fifo_fill_above_thresh++;
311 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
312 x->mtl_rx_fifo_fill_below_thresh++;
313 else
314 x->mtl_rx_fifo_fill_level_empty++;
315 }
316 if (value & MTL_DEBUG_RRCSTS_MASK) {
317 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
318 MTL_DEBUG_RRCSTS_SHIFT;
319
320 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
321 x->mtl_rx_fifo_read_ctrl_flush++;
322 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
323 x->mtl_rx_fifo_read_ctrl_read_data++;
324 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
325 x->mtl_rx_fifo_read_ctrl_status++;
326 else
327 x->mtl_rx_fifo_read_ctrl_idle++;
328 }
329 if (value & MTL_DEBUG_RWCSTS)
330 x->mtl_rx_fifo_ctrl_active++;
331
332 /* GMAC debug */
333 value = readl(ioaddr + GMAC_DEBUG);
334
335 if (value & GMAC_DEBUG_TFCSTS_MASK) {
336 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
337 >> GMAC_DEBUG_TFCSTS_SHIFT;
338
339 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
340 x->mac_tx_frame_ctrl_xfer++;
341 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
342 x->mac_tx_frame_ctrl_pause++;
343 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
344 x->mac_tx_frame_ctrl_wait++;
345 else
346 x->mac_tx_frame_ctrl_idle++;
347 }
348 if (value & GMAC_DEBUG_TPESTS)
349 x->mac_gmii_tx_proto_engine++;
350 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
351 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
352 >> GMAC_DEBUG_RFCFCSTS_SHIFT;
353 if (value & GMAC_DEBUG_RPESTS)
354 x->mac_gmii_rx_proto_engine++;
355}
356
357static const struct stmmac_ops dwmac4_ops = {
358 .core_init = dwmac4_core_init,
359 .rx_ipc = dwmac4_rx_ipc_enable,
360 .dump_regs = dwmac4_dump_regs,
361 .host_irq_status = dwmac4_irq_status,
362 .flow_ctrl = dwmac4_flow_ctrl,
363 .pmt = dwmac4_pmt,
364 .set_umac_addr = dwmac4_set_umac_addr,
365 .get_umac_addr = dwmac4_get_umac_addr,
366 .ctrl_ane = dwmac4_ctrl_ane,
367 .get_adv = dwmac4_get_adv,
368 .debug = dwmac4_debug,
369 .set_filter = dwmac4_set_filter,
370};
371
372struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
373 int perfect_uc_entries, int *synopsys_id)
374{
375 struct mac_device_info *mac;
376 u32 hwid = readl(ioaddr + GMAC_VERSION);
377
378 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
379 if (!mac)
380 return NULL;
381
382 mac->pcsr = ioaddr;
383 mac->multicast_filter_bins = mcbins;
384 mac->unicast_filter_entries = perfect_uc_entries;
385 mac->mcast_bits_log2 = 0;
386
387 if (mac->multicast_filter_bins)
388 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
389
390 mac->mac = &dwmac4_ops;
391
392 mac->link.port = GMAC_CONFIG_PS;
393 mac->link.duplex = GMAC_CONFIG_DM;
394 mac->link.speed = GMAC_CONFIG_FES;
395 mac->mii.addr = GMAC_MDIO_ADDR;
396 mac->mii.data = GMAC_MDIO_DATA;
397
398 /* Get and dump the chip ID */
399 *synopsys_id = stmmac_get_synopsys_id(hwid);
400
401 if (*synopsys_id > DWMAC_CORE_4_00)
402 mac->dma = &dwmac410_dma_ops;
403 else
404 mac->dma = &dwmac4_dma_ops;
405
406 return mac;
407}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
new file mode 100644
index 000000000000..d4952c7a836d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -0,0 +1,396 @@
1/*
2 * This contains the functions to handle the descriptors for DesignWare databook
3 * 4.xx.
4 *
5 * Copyright (C) 2015 STMicroelectronics Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * Author: Alexandre Torgue <alexandre.torgue@st.com>
12 */
13
14#include <linux/stmmac.h>
15#include "common.h"
16#include "dwmac4_descs.h"
17
18static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
19 struct dma_desc *p,
20 void __iomem *ioaddr)
21{
22 struct net_device_stats *stats = (struct net_device_stats *)data;
23 unsigned int tdes3;
24 int ret = tx_done;
25
26 tdes3 = p->des3;
27
28 /* Get tx owner first */
29 if (unlikely(tdes3 & TDES3_OWN))
30 return tx_dma_own;
31
32 /* Verify tx error by looking at the last segment. */
33 if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR)))
34 return tx_not_ls;
35
36 if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
37 if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
38 x->tx_jabber++;
39 if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
40 x->tx_frame_flushed++;
41 if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
42 x->tx_losscarrier++;
43 stats->tx_carrier_errors++;
44 }
45 if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
46 x->tx_carrier++;
47 stats->tx_carrier_errors++;
48 }
49 if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
50 (tdes3 & TDES3_EXCESSIVE_COLLISION)))
51 stats->collisions +=
52 (tdes3 & TDES3_COLLISION_COUNT_MASK)
53 >> TDES3_COLLISION_COUNT_SHIFT;
54
55 if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
56 x->tx_deferred++;
57
58 if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR))
59 x->tx_underflow++;
60
61 if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
62 x->tx_ip_header_error++;
63
64 if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
65 x->tx_payload_error++;
66
67 ret = tx_err;
68 }
69
70 if (unlikely(tdes3 & TDES3_DEFERRED))
71 x->tx_deferred++;
72
73 return ret;
74}
75
76static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
77 struct dma_desc *p)
78{
79 struct net_device_stats *stats = (struct net_device_stats *)data;
80 unsigned int rdes1 = p->des1;
81 unsigned int rdes2 = p->des2;
82 unsigned int rdes3 = p->des3;
83 int message_type;
84 int ret = good_frame;
85
86 if (unlikely(rdes3 & RDES3_OWN))
87 return dma_own;
88
89 /* Verify rx error by looking at the last segment. */
90 if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
91 return discard_frame;
92
93 if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
94 if (unlikely(rdes3 & RDES3_GIANT_PACKET))
95 stats->rx_length_errors++;
96 if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
97 x->rx_gmac_overflow++;
98
99 if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
100 x->rx_watchdog++;
101
102 if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
103 x->rx_mii++;
104
105 if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
106 x->rx_crc++;
107 stats->rx_crc_errors++;
108 }
109
110 if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
111 x->dribbling_bit++;
112
113 ret = discard_frame;
114 }
115
116 message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
117
118 if (rdes1 & RDES1_IP_HDR_ERROR)
119 x->ip_hdr_err++;
120 if (rdes1 & RDES1_IP_CSUM_BYPASSED)
121 x->ip_csum_bypassed++;
122 if (rdes1 & RDES1_IPV4_HEADER)
123 x->ipv4_pkt_rcvd++;
124 if (rdes1 & RDES1_IPV6_HEADER)
125 x->ipv6_pkt_rcvd++;
126 if (message_type == RDES_EXT_SYNC)
127 x->rx_msg_type_sync++;
128 else if (message_type == RDES_EXT_FOLLOW_UP)
129 x->rx_msg_type_follow_up++;
130 else if (message_type == RDES_EXT_DELAY_REQ)
131 x->rx_msg_type_delay_req++;
132 else if (message_type == RDES_EXT_DELAY_RESP)
133 x->rx_msg_type_delay_resp++;
134 else if (message_type == RDES_EXT_PDELAY_REQ)
135 x->rx_msg_type_pdelay_req++;
136 else if (message_type == RDES_EXT_PDELAY_RESP)
137 x->rx_msg_type_pdelay_resp++;
138 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
139 x->rx_msg_type_pdelay_follow_up++;
140 else
141 x->rx_msg_type_ext_no_ptp++;
142
143 if (rdes1 & RDES1_PTP_PACKET_TYPE)
144 x->ptp_frame_type++;
145 if (rdes1 & RDES1_PTP_VER)
146 x->ptp_ver++;
147 if (rdes1 & RDES1_TIMESTAMP_DROPPED)
148 x->timestamp_dropped++;
149
150 if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
151 x->sa_rx_filter_fail++;
152 ret = discard_frame;
153 }
154 if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
155 x->da_rx_filter_fail++;
156 ret = discard_frame;
157 }
158
159 if (rdes2 & RDES2_L3_FILTER_MATCH)
160 x->l3_filter_match++;
161 if (rdes2 & RDES2_L4_FILTER_MATCH)
162 x->l4_filter_match++;
163 if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
164 >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
165 x->l3_l4_filter_no_match++;
166
167 return ret;
168}
169
170static int dwmac4_rd_get_tx_len(struct dma_desc *p)
171{
172 return (p->des2 & TDES2_BUFFER1_SIZE_MASK);
173}
174
175static int dwmac4_get_tx_owner(struct dma_desc *p)
176{
177 return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT;
178}
179
180static void dwmac4_set_tx_owner(struct dma_desc *p)
181{
182 p->des3 |= TDES3_OWN;
183}
184
185static void dwmac4_set_rx_owner(struct dma_desc *p)
186{
187 p->des3 |= RDES3_OWN;
188}
189
190static int dwmac4_get_tx_ls(struct dma_desc *p)
191{
192 return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT;
193}
194
195static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
196{
197 return (p->des3 & RDES3_PACKET_SIZE_MASK);
198}
199
200static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
201{
202 p->des2 |= TDES2_TIMESTAMP_ENABLE;
203}
204
205static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
206{
207 return (p->des3 & TDES3_TIMESTAMP_STATUS)
208 >> TDES3_TIMESTAMP_STATUS_SHIFT;
209}
210
211/* NOTE: For RX CTX bit has to be checked before
212 * HAVE a specific function for TX and another one for RX
213 */
214static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
215{
216 struct dma_desc *p = (struct dma_desc *)desc;
217 u64 ns;
218
219 ns = p->des0;
220 /* convert high/sec time stamp value to nanosecond */
221 ns += p->des1 * 1000000000ULL;
222
223 return ns;
224}
225
226static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats)
227{
228 struct dma_desc *p = (struct dma_desc *)desc;
229
230 return (p->des1 & RDES1_TIMESTAMP_AVAILABLE)
231 >> RDES1_TIMESTAMP_AVAILABLE_SHIFT;
232}
233
234static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
235 int mode, int end)
236{
237 p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR;
238
239 if (!disable_rx_ic)
240 p->des3 |= RDES3_INT_ON_COMPLETION_EN;
241}
242
243static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
244{
245 p->des0 = 0;
246 p->des1 = 0;
247 p->des2 = 0;
248 p->des3 = 0;
249}
250
251static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
252 bool csum_flag, int mode, bool tx_own,
253 bool ls)
254{
255 unsigned int tdes3 = p->des3;
256
257 if (unlikely(len > BUF_SIZE_16KiB)) {
258 p->des2 |= (((len - BUF_SIZE_16KiB) <<
259 TDES2_BUFFER2_SIZE_MASK_SHIFT)
260 & TDES2_BUFFER2_SIZE_MASK)
261 | (BUF_SIZE_16KiB & TDES2_BUFFER1_SIZE_MASK);
262 } else {
263 p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
264 }
265
266 if (is_fs)
267 tdes3 |= TDES3_FIRST_DESCRIPTOR;
268 else
269 tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
270
271 if (likely(csum_flag))
272 tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
273 else
274 tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
275
276 if (ls)
277 tdes3 |= TDES3_LAST_DESCRIPTOR;
278 else
279 tdes3 &= ~TDES3_LAST_DESCRIPTOR;
280
281 /* Finally set the OWN bit. Later the DMA will start! */
282 if (tx_own)
283 tdes3 |= TDES3_OWN;
284
285 if (is_fs & tx_own)
286 /* When the own bit, for the first frame, has to be set, all
287 * descriptors for the same frame has to be set before, to
288 * avoid race condition.
289 */
290 wmb();
291
292 p->des3 = tdes3;
293}
294
295static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
296 int len1, int len2, bool tx_own,
297 bool ls, unsigned int tcphdrlen,
298 unsigned int tcppayloadlen)
299{
300 unsigned int tdes3 = p->des3;
301
302 if (len1)
303 p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK);
304
305 if (len2)
306 p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
307 & TDES2_BUFFER2_SIZE_MASK;
308
309 if (is_fs) {
310 tdes3 |= TDES3_FIRST_DESCRIPTOR |
311 TDES3_TCP_SEGMENTATION_ENABLE |
312 ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
313 TDES3_SLOT_NUMBER_MASK) |
314 ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
315 } else {
316 tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
317 }
318
319 if (ls)
320 tdes3 |= TDES3_LAST_DESCRIPTOR;
321 else
322 tdes3 &= ~TDES3_LAST_DESCRIPTOR;
323
324 /* Finally set the OWN bit. Later the DMA will start! */
325 if (tx_own)
326 tdes3 |= TDES3_OWN;
327
328 if (is_fs & tx_own)
329 /* When the own bit, for the first frame, has to be set, all
330 * descriptors for the same frame has to be set before, to
331 * avoid race condition.
332 */
333 wmb();
334
335 p->des3 = tdes3;
336}
337
338static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
339{
340 p->des2 = 0;
341 p->des3 = 0;
342}
343
344static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
345{
346 p->des2 |= TDES2_INTERRUPT_ON_COMPLETION;
347}
348
349static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
350{
351 struct dma_desc *p = (struct dma_desc *)head;
352 int i;
353
354 pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
355
356 for (i = 0; i < size; i++) {
357 if (p->des0)
358 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
359 i, (unsigned int)virt_to_phys(p),
360 p->des0, p->des1, p->des2, p->des3);
361 p++;
362 }
363}
364
365static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
366{
367 p->des0 = 0;
368 p->des1 = 0;
369 p->des2 = mss;
370 p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV;
371}
372
373const struct stmmac_desc_ops dwmac4_desc_ops = {
374 .tx_status = dwmac4_wrback_get_tx_status,
375 .rx_status = dwmac4_wrback_get_rx_status,
376 .get_tx_len = dwmac4_rd_get_tx_len,
377 .get_tx_owner = dwmac4_get_tx_owner,
378 .set_tx_owner = dwmac4_set_tx_owner,
379 .set_rx_owner = dwmac4_set_rx_owner,
380 .get_tx_ls = dwmac4_get_tx_ls,
381 .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
382 .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
383 .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
384 .get_timestamp = dwmac4_wrback_get_timestamp,
385 .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status,
386 .set_tx_ic = dwmac4_rd_set_tx_ic,
387 .prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
388 .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
389 .release_tx_desc = dwmac4_release_tx_desc,
390 .init_rx_desc = dwmac4_rd_init_rx_desc,
391 .init_tx_desc = dwmac4_rd_init_tx_desc,
392 .display_ring = dwmac4_display_ring,
393 .set_mss = dwmac4_set_mss_ctxt,
394};
395
396const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
new file mode 100644
index 000000000000..0902a2edeaa9
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -0,0 +1,129 @@
1/*
2 * Header File to describe the DMA descriptors and related definitions specific
3 * for DesignWare databook 4.xx.
4 *
5 * Copyright (C) 2015 STMicroelectronics Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * Author: Alexandre Torgue <alexandre.torgue@st.com>
12 */
13
14#ifndef __DWMAC4_DESCS_H__
15#define __DWMAC4_DESCS_H__
16
17#include <linux/bitops.h>
18
19/* Normal transmit descriptor defines (without split feature) */
20
21/* TDES2 (read format) */
22#define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0)
23#define TDES2_VLAN_TAG_MASK GENMASK(15, 14)
24#define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16)
25#define TDES2_BUFFER2_SIZE_MASK_SHIFT 16
26#define TDES2_TIMESTAMP_ENABLE BIT(30)
27#define TDES2_INTERRUPT_ON_COMPLETION BIT(31)
28
29/* TDES3 (read format) */
30#define TDES3_PACKET_SIZE_MASK GENMASK(14, 0)
31#define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16)
32#define TDES3_CHECKSUM_INSERTION_SHIFT 16
33#define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0)
34#define TDES3_TCP_SEGMENTATION_ENABLE BIT(18)
35#define TDES3_HDR_LEN_SHIFT 19
36#define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19)
37#define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23)
38#define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26)
39
40/* TDES3 (write back format) */
41#define TDES3_IP_HDR_ERROR BIT(0)
42#define TDES3_DEFERRED BIT(1)
43#define TDES3_UNDERFLOW_ERROR BIT(2)
44#define TDES3_EXCESSIVE_DEFERRAL BIT(3)
45#define TDES3_COLLISION_COUNT_MASK GENMASK(7, 4)
46#define TDES3_COLLISION_COUNT_SHIFT 4
47#define TDES3_EXCESSIVE_COLLISION BIT(8)
48#define TDES3_LATE_COLLISION BIT(9)
49#define TDES3_NO_CARRIER BIT(10)
50#define TDES3_LOSS_CARRIER BIT(11)
51#define TDES3_PAYLOAD_ERROR BIT(12)
52#define TDES3_PACKET_FLUSHED BIT(13)
53#define TDES3_JABBER_TIMEOUT BIT(14)
54#define TDES3_ERROR_SUMMARY BIT(15)
55#define TDES3_TIMESTAMP_STATUS BIT(17)
56#define TDES3_TIMESTAMP_STATUS_SHIFT 17
57
58/* TDES3 context */
59#define TDES3_CTXT_TCMSSV BIT(26)
60
61/* TDES3 Common */
62#define TDES3_LAST_DESCRIPTOR BIT(28)
63#define TDES3_LAST_DESCRIPTOR_SHIFT 28
64#define TDES3_FIRST_DESCRIPTOR BIT(29)
65#define TDES3_CONTEXT_TYPE BIT(30)
66
67/* TDS3 use for both format (read and write back) */
68#define TDES3_OWN BIT(31)
69#define TDES3_OWN_SHIFT 31
70
71/* Normal receive descriptor defines (without split feature) */
72
73/* RDES0 (write back format) */
74#define RDES0_VLAN_TAG_MASK GENMASK(15, 0)
75
76/* RDES1 (write back format) */
77#define RDES1_IP_PAYLOAD_TYPE_MASK GENMASK(2, 0)
78#define RDES1_IP_HDR_ERROR BIT(3)
79#define RDES1_IPV4_HEADER BIT(4)
80#define RDES1_IPV6_HEADER BIT(5)
81#define RDES1_IP_CSUM_BYPASSED BIT(6)
82#define RDES1_IP_CSUM_ERROR BIT(7)
83#define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8)
84#define RDES1_PTP_PACKET_TYPE BIT(12)
85#define RDES1_PTP_VER BIT(13)
86#define RDES1_TIMESTAMP_AVAILABLE BIT(14)
87#define RDES1_TIMESTAMP_AVAILABLE_SHIFT 14
88#define RDES1_TIMESTAMP_DROPPED BIT(15)
89#define RDES1_IP_TYPE1_CSUM_MASK GENMASK(31, 16)
90
91/* RDES2 (write back format) */
92#define RDES2_L3_L4_HEADER_SIZE_MASK GENMASK(9, 0)
93#define RDES2_VLAN_FILTER_STATUS BIT(15)
94#define RDES2_SA_FILTER_FAIL BIT(16)
95#define RDES2_DA_FILTER_FAIL BIT(17)
96#define RDES2_HASH_FILTER_STATUS BIT(18)
97#define RDES2_MAC_ADDR_MATCH_MASK GENMASK(26, 19)
98#define RDES2_HASH_VALUE_MATCH_MASK GENMASK(26, 19)
99#define RDES2_L3_FILTER_MATCH BIT(27)
100#define RDES2_L4_FILTER_MATCH BIT(28)
101#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26)
102#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26
103
104/* RDES3 (write back format) */
105#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0)
106#define RDES3_ERROR_SUMMARY BIT(15)
107#define RDES3_PACKET_LEN_TYPE_MASK GENMASK(18, 16)
108#define RDES3_DRIBBLE_ERROR BIT(19)
109#define RDES3_RECEIVE_ERROR BIT(20)
110#define RDES3_OVERFLOW_ERROR BIT(21)
111#define RDES3_RECEIVE_WATCHDOG BIT(22)
112#define RDES3_GIANT_PACKET BIT(23)
113#define RDES3_CRC_ERROR BIT(24)
114#define RDES3_RDES0_VALID BIT(25)
115#define RDES3_RDES1_VALID BIT(26)
116#define RDES3_RDES2_VALID BIT(27)
117#define RDES3_LAST_DESCRIPTOR BIT(28)
118#define RDES3_FIRST_DESCRIPTOR BIT(29)
119#define RDES3_CONTEXT_DESCRIPTOR BIT(30)
120
121/* RDES3 (read format) */
122#define RDES3_BUFFER1_VALID_ADDR BIT(24)
123#define RDES3_BUFFER2_VALID_ADDR BIT(25)
124#define RDES3_INT_ON_COMPLETION_EN BIT(30)
125
126/* TDS3 use for both format (read and write back) */
127#define RDES3_OWN BIT(31)
128
129#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
new file mode 100644
index 000000000000..116151cd6a95
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -0,0 +1,354 @@
1/*
2 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 * DWC Ether MAC version 4.xx has been used for developing this code.
4 *
5 * This contains the functions to handle the dma.
6 *
7 * Copyright (C) 2015 STMicroelectronics Ltd
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * Author: Alexandre Torgue <alexandre.torgue@st.com>
14 */
15
16#include <linux/io.h>
17#include "dwmac4.h"
18#include "dwmac4_dma.h"
19
20static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
21{
22 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
23 int i;
24
25 pr_info("dwmac4: Master AXI performs %s burst length\n",
26 (value & DMA_SYS_BUS_FB) ? "fixed" : "any");
27
28 if (axi->axi_lpi_en)
29 value |= DMA_AXI_EN_LPI;
30 if (axi->axi_xit_frm)
31 value |= DMA_AXI_LPI_XIT_FRM;
32
33 value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
34 DMA_AXI_WR_OSR_LMT_SHIFT;
35
36 value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
37 DMA_AXI_RD_OSR_LMT_SHIFT;
38
39 /* Depending on the UNDEF bit the Master AXI will perform any burst
40 * length according to the BLEN programmed (by default all BLEN are
41 * set).
42 */
43 for (i = 0; i < AXI_BLEN; i++) {
44 switch (axi->axi_blen[i]) {
45 case 256:
46 value |= DMA_AXI_BLEN256;
47 break;
48 case 128:
49 value |= DMA_AXI_BLEN128;
50 break;
51 case 64:
52 value |= DMA_AXI_BLEN64;
53 break;
54 case 32:
55 value |= DMA_AXI_BLEN32;
56 break;
57 case 16:
58 value |= DMA_AXI_BLEN16;
59 break;
60 case 8:
61 value |= DMA_AXI_BLEN8;
62 break;
63 case 4:
64 value |= DMA_AXI_BLEN4;
65 break;
66 }
67 }
68
69 writel(value, ioaddr + DMA_SYS_BUS_MODE);
70}
71
72static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl,
73 u32 dma_tx_phy, u32 dma_rx_phy,
74 u32 channel)
75{
76 u32 value;
77
78 /* set PBL for each channels. Currently we affect same configuration
79 * on each channel
80 */
81 value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
82 value = value | DMA_BUS_MODE_PBL;
83 writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
84
85 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
86 value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT);
87 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
88
89 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
90 value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT);
91 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
92
93 /* Mask interrupts by writing to CSR7 */
94 writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
95
96 writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
97 writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
98}
99
100static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
101 int aal, u32 dma_tx, u32 dma_rx, int atds)
102{
103 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
104 int i;
105
106 /* Set the Fixed burst mode */
107 if (fb)
108 value |= DMA_SYS_BUS_FB;
109
110 /* Mixed Burst has no effect when fb is set */
111 if (mb)
112 value |= DMA_SYS_BUS_MB;
113
114 if (aal)
115 value |= DMA_SYS_BUS_AAL;
116
117 writel(value, ioaddr + DMA_SYS_BUS_MODE);
118
119 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
120 dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i);
121}
122
123static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
124{
125 pr_debug(" Channel %d\n", channel);
126 pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0,
127 readl(ioaddr + DMA_CHAN_CONTROL(channel)));
128 pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4,
129 readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)));
130 pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8,
131 readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)));
132 pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14,
133 readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)));
134 pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c,
135 readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)));
136 pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20,
137 readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)));
138 pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28,
139 readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)));
140 pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c,
141 readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)));
142 pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30,
143 readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)));
144 pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34,
145 readl(ioaddr + DMA_CHAN_INTR_ENA(channel)));
146 pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38,
147 readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)));
148 pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c,
149 readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)));
150 pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44,
151 readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)));
152 pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c,
153 readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)));
154 pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54,
155 readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)));
156 pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c,
157 readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)));
158 pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60,
159 readl(ioaddr + DMA_CHAN_STATUS(channel)));
160}
161
162static void dwmac4_dump_dma_regs(void __iomem *ioaddr)
163{
164 int i;
165
166 pr_debug(" GMAC4 DMA registers\n");
167
168 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
169 _dwmac4_dump_dma_regs(ioaddr, i);
170}
171
172static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
173{
174 int i;
175
176 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
177 writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
178}
179
180static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
181 int rxmode, u32 channel)
182{
183 u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
184
185 /* Following code only done for channel 0, other channels not yet
186 * supported.
187 */
188 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
189
190 if (txmode == SF_DMA_MODE) {
191 pr_debug("GMAC: enable TX store and forward mode\n");
192 /* Transmit COE type 2 cannot be done in cut-through mode. */
193 mtl_tx_op |= MTL_OP_MODE_TSF;
194 } else {
195 pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
196 mtl_tx_op &= ~MTL_OP_MODE_TSF;
197 mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
198 /* Set the transmit threshold */
199 if (txmode <= 32)
200 mtl_tx_op |= MTL_OP_MODE_TTC_32;
201 else if (txmode <= 64)
202 mtl_tx_op |= MTL_OP_MODE_TTC_64;
203 else if (txmode <= 96)
204 mtl_tx_op |= MTL_OP_MODE_TTC_96;
205 else if (txmode <= 128)
206 mtl_tx_op |= MTL_OP_MODE_TTC_128;
207 else if (txmode <= 192)
208 mtl_tx_op |= MTL_OP_MODE_TTC_192;
209 else if (txmode <= 256)
210 mtl_tx_op |= MTL_OP_MODE_TTC_256;
211 else if (txmode <= 384)
212 mtl_tx_op |= MTL_OP_MODE_TTC_384;
213 else
214 mtl_tx_op |= MTL_OP_MODE_TTC_512;
215 }
216
217 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
218
219 mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
220
221 if (rxmode == SF_DMA_MODE) {
222 pr_debug("GMAC: enable RX store and forward mode\n");
223 mtl_rx_op |= MTL_OP_MODE_RSF;
224 } else {
225 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
226 mtl_rx_op &= ~MTL_OP_MODE_RSF;
227 mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
228 if (rxmode <= 32)
229 mtl_rx_op |= MTL_OP_MODE_RTC_32;
230 else if (rxmode <= 64)
231 mtl_rx_op |= MTL_OP_MODE_RTC_64;
232 else if (rxmode <= 96)
233 mtl_rx_op |= MTL_OP_MODE_RTC_96;
234 else
235 mtl_rx_op |= MTL_OP_MODE_RTC_128;
236 }
237
238 writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
239
240 /* Enable MTL RX overflow */
241 mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
242 writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
243 ioaddr + MTL_CHAN_INT_CTRL(channel));
244}
245
246static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
247 int rxmode, int rxfifosz)
248{
249 /* Only Channel 0 is actually configured and used */
250 dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
251}
252
253static void dwmac4_get_hw_feature(void __iomem *ioaddr,
254 struct dma_features *dma_cap)
255{
256 u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
257
258 /* MAC HW feature0 */
259 dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL);
260 dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1;
261 dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2;
262 dma_cap->hash_filter = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4;
263 dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18;
264 dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3;
265 dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5;
266 dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6;
267 dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7;
268 /* MMC */
269 dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8;
270 /* IEEE 1588-2008 */
271 dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12;
272 /* 802.3az - Energy-Efficient Ethernet (EEE) */
273 dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13;
274 /* TX and RX csum */
275 dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14;
276 dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16;
277
278 /* MAC HW feature1 */
279 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
280 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
281 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
282 /* MAC HW feature2 */
283 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
284 /* TX and RX number of channels */
285 dma_cap->number_rx_channel =
286 ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
287 dma_cap->number_tx_channel =
288 ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
289
290 /* IEEE 1588-2002 */
291 dma_cap->time_stamp = 0;
292}
293
294/* Enable/disable TSO feature and set MSS */
295static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
296{
297 u32 value;
298
299 if (en) {
300 /* enable TSO */
301 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
302 writel(value | DMA_CONTROL_TSE,
303 ioaddr + DMA_CHAN_TX_CONTROL(chan));
304 } else {
305 /* enable TSO */
306 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
307 writel(value & ~DMA_CONTROL_TSE,
308 ioaddr + DMA_CHAN_TX_CONTROL(chan));
309 }
310}
311
312const struct stmmac_dma_ops dwmac4_dma_ops = {
313 .reset = dwmac4_dma_reset,
314 .init = dwmac4_dma_init,
315 .axi = dwmac4_dma_axi,
316 .dump_regs = dwmac4_dump_dma_regs,
317 .dma_mode = dwmac4_dma_operation_mode,
318 .enable_dma_irq = dwmac4_enable_dma_irq,
319 .disable_dma_irq = dwmac4_disable_dma_irq,
320 .start_tx = dwmac4_dma_start_tx,
321 .stop_tx = dwmac4_dma_stop_tx,
322 .start_rx = dwmac4_dma_start_rx,
323 .stop_rx = dwmac4_dma_stop_rx,
324 .dma_interrupt = dwmac4_dma_interrupt,
325 .get_hw_feature = dwmac4_get_hw_feature,
326 .rx_watchdog = dwmac4_rx_watchdog,
327 .set_rx_ring_len = dwmac4_set_rx_ring_len,
328 .set_tx_ring_len = dwmac4_set_tx_ring_len,
329 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
330 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
331 .enable_tso = dwmac4_enable_tso,
332};
333
334const struct stmmac_dma_ops dwmac410_dma_ops = {
335 .reset = dwmac4_dma_reset,
336 .init = dwmac4_dma_init,
337 .axi = dwmac4_dma_axi,
338 .dump_regs = dwmac4_dump_dma_regs,
339 .dma_mode = dwmac4_dma_operation_mode,
340 .enable_dma_irq = dwmac410_enable_dma_irq,
341 .disable_dma_irq = dwmac4_disable_dma_irq,
342 .start_tx = dwmac4_dma_start_tx,
343 .stop_tx = dwmac4_dma_stop_tx,
344 .start_rx = dwmac4_dma_start_rx,
345 .stop_rx = dwmac4_dma_stop_rx,
346 .dma_interrupt = dwmac4_dma_interrupt,
347 .get_hw_feature = dwmac4_get_hw_feature,
348 .rx_watchdog = dwmac4_rx_watchdog,
349 .set_rx_ring_len = dwmac4_set_rx_ring_len,
350 .set_tx_ring_len = dwmac4_set_tx_ring_len,
351 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
352 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
353 .enable_tso = dwmac4_enable_tso,
354};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
new file mode 100644
index 000000000000..1b06df749e2b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -0,0 +1,202 @@
1/*
2 * DWMAC4 DMA Header file.
3 *
4 *
5 * Copyright (C) 2007-2015 STMicroelectronics Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * Author: Alexandre Torgue <alexandre.torgue@st.com>
12 */
13
14#ifndef __DWMAC4_DMA_H__
15#define __DWMAC4_DMA_H__
16
17/* Define the max channel number used for tx (also rx).
18 * dwmac4 accepts up to 8 channels for TX (and also 8 channels for RX
19 */
20#define DMA_CHANNEL_NB_MAX 1
21
22#define DMA_BUS_MODE 0x00001000
23#define DMA_SYS_BUS_MODE 0x00001004
24#define DMA_STATUS 0x00001008
25#define DMA_DEBUG_STATUS_0 0x0000100c
26#define DMA_DEBUG_STATUS_1 0x00001010
27#define DMA_DEBUG_STATUS_2 0x00001014
28#define DMA_AXI_BUS_MODE 0x00001028
29
30/* DMA Bus Mode bitmap */
31#define DMA_BUS_MODE_SFT_RESET BIT(0)
32
33/* DMA SYS Bus Mode bitmap */
34#define DMA_BUS_MODE_SPH BIT(24)
35#define DMA_BUS_MODE_PBL BIT(16)
36#define DMA_BUS_MODE_PBL_SHIFT 16
37#define DMA_BUS_MODE_RPBL_SHIFT 16
38#define DMA_BUS_MODE_MB BIT(14)
39#define DMA_BUS_MODE_FB BIT(0)
40
41/* DMA Interrupt top status */
42#define DMA_STATUS_MAC BIT(17)
43#define DMA_STATUS_MTL BIT(16)
44#define DMA_STATUS_CHAN7 BIT(7)
45#define DMA_STATUS_CHAN6 BIT(6)
46#define DMA_STATUS_CHAN5 BIT(5)
47#define DMA_STATUS_CHAN4 BIT(4)
48#define DMA_STATUS_CHAN3 BIT(3)
49#define DMA_STATUS_CHAN2 BIT(2)
50#define DMA_STATUS_CHAN1 BIT(1)
51#define DMA_STATUS_CHAN0 BIT(0)
52
53/* DMA debug status bitmap */
54#define DMA_DEBUG_STATUS_TS_MASK 0xf
55#define DMA_DEBUG_STATUS_RS_MASK 0xf
56
57/* DMA AXI bitmap */
58#define DMA_AXI_EN_LPI BIT(31)
59#define DMA_AXI_LPI_XIT_FRM BIT(30)
60#define DMA_AXI_WR_OSR_LMT GENMASK(27, 24)
61#define DMA_AXI_WR_OSR_LMT_SHIFT 24
62#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
63#define DMA_AXI_RD_OSR_LMT_SHIFT 16
64
65#define DMA_AXI_OSR_MAX 0xf
66#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
67 (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
68
69#define DMA_SYS_BUS_MB BIT(14)
70#define DMA_AXI_1KBBE BIT(13)
71#define DMA_SYS_BUS_AAL BIT(12)
72#define DMA_AXI_BLEN256 BIT(7)
73#define DMA_AXI_BLEN128 BIT(6)
74#define DMA_AXI_BLEN64 BIT(5)
75#define DMA_AXI_BLEN32 BIT(4)
76#define DMA_AXI_BLEN16 BIT(3)
77#define DMA_AXI_BLEN8 BIT(2)
78#define DMA_AXI_BLEN4 BIT(1)
79#define DMA_SYS_BUS_FB BIT(0)
80
81#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
82 DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
83 DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
84 DMA_AXI_BLEN4)
85
86#define DMA_AXI_BURST_LEN_MASK 0x000000FE
87
88/* Following DMA defines are chanels oriented */
89#define DMA_CHAN_BASE_ADDR 0x00001100
90#define DMA_CHAN_BASE_OFFSET 0x80
91#define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + \
92 (x * DMA_CHAN_BASE_OFFSET))
93#define DMA_CHAN_REG_NUMBER 17
94
95#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x)
96#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4)
97#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8)
98#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14)
99#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c)
100#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20)
101#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28)
102#define DMA_CHAN_TX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x2c)
103#define DMA_CHAN_RX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x30)
104#define DMA_CHAN_INTR_ENA(x) (DMA_CHANX_BASE_ADDR(x) + 0x34)
105#define DMA_CHAN_RX_WATCHDOG(x) (DMA_CHANX_BASE_ADDR(x) + 0x38)
106#define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c)
107#define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44)
108#define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c)
109#define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54)
110#define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c)
111#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
112
113/* DMA Control X */
114#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
115
116/* DMA Tx Channel X Control register defines */
117#define DMA_CONTROL_TSE BIT(12)
118#define DMA_CONTROL_OSP BIT(4)
119#define DMA_CONTROL_ST BIT(0)
120
121/* DMA Rx Channel X Control register defines */
122#define DMA_CONTROL_SR BIT(0)
123
124/* Interrupt status per channel */
125#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
126#define DMA_CHAN_STATUS_REB_SHIFT 19
127#define DMA_CHAN_STATUS_TEB GENMASK(18, 16)
128#define DMA_CHAN_STATUS_TEB_SHIFT 16
129#define DMA_CHAN_STATUS_NIS BIT(15)
130#define DMA_CHAN_STATUS_AIS BIT(14)
131#define DMA_CHAN_STATUS_CDE BIT(13)
132#define DMA_CHAN_STATUS_FBE BIT(12)
133#define DMA_CHAN_STATUS_ERI BIT(11)
134#define DMA_CHAN_STATUS_ETI BIT(10)
135#define DMA_CHAN_STATUS_RWT BIT(9)
136#define DMA_CHAN_STATUS_RPS BIT(8)
137#define DMA_CHAN_STATUS_RBU BIT(7)
138#define DMA_CHAN_STATUS_RI BIT(6)
139#define DMA_CHAN_STATUS_TBU BIT(2)
140#define DMA_CHAN_STATUS_TPS BIT(1)
141#define DMA_CHAN_STATUS_TI BIT(0)
142
143/* Interrupt enable bits per channel */
144#define DMA_CHAN_INTR_ENA_NIE BIT(16)
145#define DMA_CHAN_INTR_ENA_AIE BIT(15)
146#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15)
147#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14)
148#define DMA_CHAN_INTR_ENA_CDE BIT(13)
149#define DMA_CHAN_INTR_ENA_FBE BIT(12)
150#define DMA_CHAN_INTR_ENA_ERE BIT(11)
151#define DMA_CHAN_INTR_ENA_ETE BIT(10)
152#define DMA_CHAN_INTR_ENA_RWE BIT(9)
153#define DMA_CHAN_INTR_ENA_RSE BIT(8)
154#define DMA_CHAN_INTR_ENA_RBUE BIT(7)
155#define DMA_CHAN_INTR_ENA_RIE BIT(6)
156#define DMA_CHAN_INTR_ENA_TBUE BIT(2)
157#define DMA_CHAN_INTR_ENA_TSE BIT(1)
158#define DMA_CHAN_INTR_ENA_TIE BIT(0)
159
160#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \
161 DMA_CHAN_INTR_ENA_RIE | \
162 DMA_CHAN_INTR_ENA_TIE)
163
164#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \
165 DMA_CHAN_INTR_ENA_FBE)
166/* DMA default interrupt mask for 4.00 */
167#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
168 DMA_CHAN_INTR_ABNORMAL)
169
170#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
171 DMA_CHAN_INTR_ENA_RIE | \
172 DMA_CHAN_INTR_ENA_TIE)
173
174#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \
175 DMA_CHAN_INTR_ENA_FBE)
176/* DMA default interrupt mask for 4.10a */
177#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
178 DMA_CHAN_INTR_ABNORMAL_4_10)
179
180/* channel 0 specific fields */
181#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
182#define DMA_CHAN0_DBG_STAT_TPS_SHIFT 12
183#define DMA_CHAN0_DBG_STAT_RPS GENMASK(11, 8)
184#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
185
186int dwmac4_dma_reset(void __iomem *ioaddr);
187void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
188void dwmac4_enable_dma_irq(void __iomem *ioaddr);
189void dwmac410_enable_dma_irq(void __iomem *ioaddr);
190void dwmac4_disable_dma_irq(void __iomem *ioaddr);
191void dwmac4_dma_start_tx(void __iomem *ioaddr);
192void dwmac4_dma_stop_tx(void __iomem *ioaddr);
193void dwmac4_dma_start_rx(void __iomem *ioaddr);
194void dwmac4_dma_stop_rx(void __iomem *ioaddr);
195int dwmac4_dma_interrupt(void __iomem *ioaddr,
196 struct stmmac_extra_stats *x);
197void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
198void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
199void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
200void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
201
202#endif /* __DWMAC4_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
new file mode 100644
index 000000000000..c7326d5b2f43
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -0,0 +1,225 @@
1/*
2 * Copyright (C) 2007-2015 STMicroelectronics Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * Author: Alexandre Torgue <alexandre.torgue@st.com>
9 */
10
11#include <linux/io.h>
12#include <linux/delay.h>
13#include "common.h"
14#include "dwmac4_dma.h"
15#include "dwmac4.h"
16
17int dwmac4_dma_reset(void __iomem *ioaddr)
18{
19 u32 value = readl(ioaddr + DMA_BUS_MODE);
20 int limit;
21
22 /* DMA SW reset */
23 value |= DMA_BUS_MODE_SFT_RESET;
24 writel(value, ioaddr + DMA_BUS_MODE);
25 limit = 10;
26 while (limit--) {
27 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
28 break;
29 mdelay(10);
30 }
31
32 if (limit < 0)
33 return -EBUSY;
34
35 return 0;
36}
37
38void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
39{
40 writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
41}
42
43void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
44{
45 writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
46}
47
48void dwmac4_dma_start_tx(void __iomem *ioaddr)
49{
50 u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
51
52 value |= DMA_CONTROL_ST;
53 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
54
55 value = readl(ioaddr + GMAC_CONFIG);
56 value |= GMAC_CONFIG_TE;
57 writel(value, ioaddr + GMAC_CONFIG);
58}
59
60void dwmac4_dma_stop_tx(void __iomem *ioaddr)
61{
62 u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
63
64 value &= ~DMA_CONTROL_ST;
65 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
66
67 value = readl(ioaddr + GMAC_CONFIG);
68 value &= ~GMAC_CONFIG_TE;
69 writel(value, ioaddr + GMAC_CONFIG);
70}
71
72void dwmac4_dma_start_rx(void __iomem *ioaddr)
73{
74 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
75
76 value |= DMA_CONTROL_SR;
77
78 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
79
80 value = readl(ioaddr + GMAC_CONFIG);
81 value |= GMAC_CONFIG_RE;
82 writel(value, ioaddr + GMAC_CONFIG);
83}
84
85void dwmac4_dma_stop_rx(void __iomem *ioaddr)
86{
87 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
88
89 value &= ~DMA_CONTROL_SR;
90 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
91
92 value = readl(ioaddr + GMAC_CONFIG);
93 value &= ~GMAC_CONFIG_RE;
94 writel(value, ioaddr + GMAC_CONFIG);
95}
96
97void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
98{
99 writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
100}
101
102void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
103{
104 writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
105}
106
107void dwmac4_enable_dma_irq(void __iomem *ioaddr)
108{
109 writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
110 DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
111}
112
113void dwmac410_enable_dma_irq(void __iomem *ioaddr)
114{
115 writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
116 ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
117}
118
119void dwmac4_disable_dma_irq(void __iomem *ioaddr)
120{
121 writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
122}
123
124int dwmac4_dma_interrupt(void __iomem *ioaddr,
125 struct stmmac_extra_stats *x)
126{
127 int ret = 0;
128
129 u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
130
131 /* ABNORMAL interrupts */
132 if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
133 if (unlikely(intr_status & DMA_CHAN_STATUS_RBU))
134 x->rx_buf_unav_irq++;
135 if (unlikely(intr_status & DMA_CHAN_STATUS_RPS))
136 x->rx_process_stopped_irq++;
137 if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
138 x->rx_watchdog_irq++;
139 if (unlikely(intr_status & DMA_CHAN_STATUS_ETI))
140 x->tx_early_irq++;
141 if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) {
142 x->tx_process_stopped_irq++;
143 ret = tx_hard_error;
144 }
145 if (unlikely(intr_status & DMA_CHAN_STATUS_FBE)) {
146 x->fatal_bus_error_irq++;
147 ret = tx_hard_error;
148 }
149 }
150 /* TX/RX NORMAL interrupts */
151 if (likely(intr_status & DMA_CHAN_STATUS_NIS)) {
152 x->normal_irq_n++;
153 if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
154 u32 value;
155
156 value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
157 /* to schedule NAPI on real RIE event. */
158 if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
159 x->rx_normal_irq_n++;
160 ret |= handle_rx;
161 }
162 }
163 if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
164 x->tx_normal_irq_n++;
165 ret |= handle_tx;
166 }
167 if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
168 x->rx_early_irq++;
169 }
170
171 /* Clear the interrupt by writing a logic 1 to the chanX interrupt
172 * status [21-0] expect reserved bits [5-3]
173 */
174 writel((intr_status & 0x3fffc7),
175 ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
176
177 return ret;
178}
179
180void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
181 unsigned int high, unsigned int low)
182{
183 unsigned long data;
184
185 data = (addr[5] << 8) | addr[4];
186 /* For MAC Addr registers se have to set the Address Enable (AE)
187 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
188 * is RO.
189 */
190 data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT);
191 writel(data | GMAC_HI_REG_AE, ioaddr + high);
192 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
193 writel(data, ioaddr + low);
194}
195
196/* Enable disable MAC RX/TX */
197void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable)
198{
199 u32 value = readl(ioaddr + GMAC_CONFIG);
200
201 if (enable)
202 value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE;
203 else
204 value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE);
205
206 writel(value, ioaddr + GMAC_CONFIG);
207}
208
209void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
210 unsigned int high, unsigned int low)
211{
212 unsigned int hi_addr, lo_addr;
213
214 /* Read the MAC address from the hardware */
215 hi_addr = readl(ioaddr + high);
216 lo_addr = readl(ioaddr + low);
217
218 /* Extract the MAC address from the high and low words */
219 addr[0] = lo_addr & 0xff;
220 addr[1] = (lo_addr >> 8) & 0xff;
221 addr[2] = (lo_addr >> 16) & 0xff;
222 addr[3] = (lo_addr >> 24) & 0xff;
223 addr[4] = hi_addr & 0xff;
224 addr[5] = (hi_addr >> 8) & 0xff;
225}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index cfb018c7c5eb..38f19c99cf59 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -411,6 +411,26 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
411 } 411 }
412} 412}
413 413
414static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
415{
416 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
417 int i;
418
419 pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
420
421 for (i = 0; i < size; i++) {
422 u64 x;
423
424 x = *(u64 *)ep;
425 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
426 i, (unsigned int)virt_to_phys(ep),
427 (unsigned int)x, (unsigned int)(x >> 32),
428 ep->basic.des2, ep->basic.des3);
429 ep++;
430 }
431 pr_info("\n");
432}
433
414const struct stmmac_desc_ops enh_desc_ops = { 434const struct stmmac_desc_ops enh_desc_ops = {
415 .tx_status = enh_desc_get_tx_status, 435 .tx_status = enh_desc_get_tx_status,
416 .rx_status = enh_desc_get_rx_status, 436 .rx_status = enh_desc_get_rx_status,
@@ -430,4 +450,5 @@ const struct stmmac_desc_ops enh_desc_ops = {
430 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status, 450 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
431 .get_timestamp = enh_desc_get_timestamp, 451 .get_timestamp = enh_desc_get_timestamp,
432 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, 452 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
453 .display_ring = enh_desc_display_ring,
433}; 454};
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 192c2491330b..38a1a5603293 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -35,6 +35,10 @@
35 * current value.*/ 35 * current value.*/
36#define MMC_CNTRL_PRESET 0x10 36#define MMC_CNTRL_PRESET 0x10
37#define MMC_CNTRL_FULL_HALF_PRESET 0x20 37#define MMC_CNTRL_FULL_HALF_PRESET 0x20
38
39#define MMC_GMAC4_OFFSET 0x700
40#define MMC_GMAC3_X_OFFSET 0x100
41
38struct stmmac_counters { 42struct stmmac_counters {
39 unsigned int mmc_tx_octetcount_gb; 43 unsigned int mmc_tx_octetcount_gb;
40 unsigned int mmc_tx_framecount_gb; 44 unsigned int mmc_tx_framecount_gb;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 3f20bb1fe570..ce9aa792857b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -28,12 +28,12 @@
28 28
29/* MAC Management Counters register offset */ 29/* MAC Management Counters register offset */
30 30
31#define MMC_CNTRL 0x00000100 /* MMC Control */ 31#define MMC_CNTRL 0x00 /* MMC Control */
32#define MMC_RX_INTR 0x00000104 /* MMC RX Interrupt */ 32#define MMC_RX_INTR 0x04 /* MMC RX Interrupt */
33#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ 33#define MMC_TX_INTR 0x08 /* MMC TX Interrupt */
34#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ 34#define MMC_RX_INTR_MASK 0x0c /* MMC Interrupt Mask */
35#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ 35#define MMC_TX_INTR_MASK 0x10 /* MMC Interrupt Mask */
36#define MMC_DEFAULT_MASK 0xffffffff 36#define MMC_DEFAULT_MASK 0xffffffff
37 37
38/* MMC TX counter registers */ 38/* MMC TX counter registers */
39 39
@@ -41,115 +41,115 @@
41 * _GB register stands for good and bad frames 41 * _GB register stands for good and bad frames
42 * _G is for good only. 42 * _G is for good only.
43 */ 43 */
44#define MMC_TX_OCTETCOUNT_GB 0x00000114 44#define MMC_TX_OCTETCOUNT_GB 0x14
45#define MMC_TX_FRAMECOUNT_GB 0x00000118 45#define MMC_TX_FRAMECOUNT_GB 0x18
46#define MMC_TX_BROADCASTFRAME_G 0x0000011c 46#define MMC_TX_BROADCASTFRAME_G 0x1c
47#define MMC_TX_MULTICASTFRAME_G 0x00000120 47#define MMC_TX_MULTICASTFRAME_G 0x20
48#define MMC_TX_64_OCTETS_GB 0x00000124 48#define MMC_TX_64_OCTETS_GB 0x24
49#define MMC_TX_65_TO_127_OCTETS_GB 0x00000128 49#define MMC_TX_65_TO_127_OCTETS_GB 0x28
50#define MMC_TX_128_TO_255_OCTETS_GB 0x0000012c 50#define MMC_TX_128_TO_255_OCTETS_GB 0x2c
51#define MMC_TX_256_TO_511_OCTETS_GB 0x00000130 51#define MMC_TX_256_TO_511_OCTETS_GB 0x30
52#define MMC_TX_512_TO_1023_OCTETS_GB 0x00000134 52#define MMC_TX_512_TO_1023_OCTETS_GB 0x34
53#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x00000138 53#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x38
54#define MMC_TX_UNICAST_GB 0x0000013c 54#define MMC_TX_UNICAST_GB 0x3c
55#define MMC_TX_MULTICAST_GB 0x00000140 55#define MMC_TX_MULTICAST_GB 0x40
56#define MMC_TX_BROADCAST_GB 0x00000144 56#define MMC_TX_BROADCAST_GB 0x44
57#define MMC_TX_UNDERFLOW_ERROR 0x00000148 57#define MMC_TX_UNDERFLOW_ERROR 0x48
58#define MMC_TX_SINGLECOL_G 0x0000014c 58#define MMC_TX_SINGLECOL_G 0x4c
59#define MMC_TX_MULTICOL_G 0x00000150 59#define MMC_TX_MULTICOL_G 0x50
60#define MMC_TX_DEFERRED 0x00000154 60#define MMC_TX_DEFERRED 0x54
61#define MMC_TX_LATECOL 0x00000158 61#define MMC_TX_LATECOL 0x58
62#define MMC_TX_EXESSCOL 0x0000015c 62#define MMC_TX_EXESSCOL 0x5c
63#define MMC_TX_CARRIER_ERROR 0x00000160 63#define MMC_TX_CARRIER_ERROR 0x60
64#define MMC_TX_OCTETCOUNT_G 0x00000164 64#define MMC_TX_OCTETCOUNT_G 0x64
65#define MMC_TX_FRAMECOUNT_G 0x00000168 65#define MMC_TX_FRAMECOUNT_G 0x68
66#define MMC_TX_EXCESSDEF 0x0000016c 66#define MMC_TX_EXCESSDEF 0x6c
67#define MMC_TX_PAUSE_FRAME 0x00000170 67#define MMC_TX_PAUSE_FRAME 0x70
68#define MMC_TX_VLAN_FRAME_G 0x00000174 68#define MMC_TX_VLAN_FRAME_G 0x74
69 69
70/* MMC RX counter registers */ 70/* MMC RX counter registers */
71#define MMC_RX_FRAMECOUNT_GB 0x00000180 71#define MMC_RX_FRAMECOUNT_GB 0x80
72#define MMC_RX_OCTETCOUNT_GB 0x00000184 72#define MMC_RX_OCTETCOUNT_GB 0x84
73#define MMC_RX_OCTETCOUNT_G 0x00000188 73#define MMC_RX_OCTETCOUNT_G 0x88
74#define MMC_RX_BROADCASTFRAME_G 0x0000018c 74#define MMC_RX_BROADCASTFRAME_G 0x8c
75#define MMC_RX_MULTICASTFRAME_G 0x00000190 75#define MMC_RX_MULTICASTFRAME_G 0x90
76#define MMC_RX_CRC_ERROR 0x00000194 76#define MMC_RX_CRC_ERROR 0x94
77#define MMC_RX_ALIGN_ERROR 0x00000198 77#define MMC_RX_ALIGN_ERROR 0x98
78#define MMC_RX_RUN_ERROR 0x0000019C 78#define MMC_RX_RUN_ERROR 0x9C
79#define MMC_RX_JABBER_ERROR 0x000001A0 79#define MMC_RX_JABBER_ERROR 0xA0
80#define MMC_RX_UNDERSIZE_G 0x000001A4 80#define MMC_RX_UNDERSIZE_G 0xA4
81#define MMC_RX_OVERSIZE_G 0x000001A8 81#define MMC_RX_OVERSIZE_G 0xA8
82#define MMC_RX_64_OCTETS_GB 0x000001AC 82#define MMC_RX_64_OCTETS_GB 0xAC
83#define MMC_RX_65_TO_127_OCTETS_GB 0x000001b0 83#define MMC_RX_65_TO_127_OCTETS_GB 0xb0
84#define MMC_RX_128_TO_255_OCTETS_GB 0x000001b4 84#define MMC_RX_128_TO_255_OCTETS_GB 0xb4
85#define MMC_RX_256_TO_511_OCTETS_GB 0x000001b8 85#define MMC_RX_256_TO_511_OCTETS_GB 0xb8
86#define MMC_RX_512_TO_1023_OCTETS_GB 0x000001bc 86#define MMC_RX_512_TO_1023_OCTETS_GB 0xbc
87#define MMC_RX_1024_TO_MAX_OCTETS_GB 0x000001c0 87#define MMC_RX_1024_TO_MAX_OCTETS_GB 0xc0
88#define MMC_RX_UNICAST_G 0x000001c4 88#define MMC_RX_UNICAST_G 0xc4
89#define MMC_RX_LENGTH_ERROR 0x000001c8 89#define MMC_RX_LENGTH_ERROR 0xc8
90#define MMC_RX_AUTOFRANGETYPE 0x000001cc 90#define MMC_RX_AUTOFRANGETYPE 0xcc
91#define MMC_RX_PAUSE_FRAMES 0x000001d0 91#define MMC_RX_PAUSE_FRAMES 0xd0
92#define MMC_RX_FIFO_OVERFLOW 0x000001d4 92#define MMC_RX_FIFO_OVERFLOW 0xd4
93#define MMC_RX_VLAN_FRAMES_GB 0x000001d8 93#define MMC_RX_VLAN_FRAMES_GB 0xd8
94#define MMC_RX_WATCHDOG_ERROR 0x000001dc 94#define MMC_RX_WATCHDOG_ERROR 0xdc
95/* IPC*/ 95/* IPC*/
96#define MMC_RX_IPC_INTR_MASK 0x00000200 96#define MMC_RX_IPC_INTR_MASK 0x100
97#define MMC_RX_IPC_INTR 0x00000208 97#define MMC_RX_IPC_INTR 0x108
98/* IPv4*/ 98/* IPv4*/
99#define MMC_RX_IPV4_GD 0x00000210 99#define MMC_RX_IPV4_GD 0x110
100#define MMC_RX_IPV4_HDERR 0x00000214 100#define MMC_RX_IPV4_HDERR 0x114
101#define MMC_RX_IPV4_NOPAY 0x00000218 101#define MMC_RX_IPV4_NOPAY 0x118
102#define MMC_RX_IPV4_FRAG 0x0000021C 102#define MMC_RX_IPV4_FRAG 0x11C
103#define MMC_RX_IPV4_UDSBL 0x00000220 103#define MMC_RX_IPV4_UDSBL 0x120
104 104
105#define MMC_RX_IPV4_GD_OCTETS 0x00000250 105#define MMC_RX_IPV4_GD_OCTETS 0x150
106#define MMC_RX_IPV4_HDERR_OCTETS 0x00000254 106#define MMC_RX_IPV4_HDERR_OCTETS 0x154
107#define MMC_RX_IPV4_NOPAY_OCTETS 0x00000258 107#define MMC_RX_IPV4_NOPAY_OCTETS 0x158
108#define MMC_RX_IPV4_FRAG_OCTETS 0x0000025c 108#define MMC_RX_IPV4_FRAG_OCTETS 0x15c
109#define MMC_RX_IPV4_UDSBL_OCTETS 0x00000260 109#define MMC_RX_IPV4_UDSBL_OCTETS 0x160
110 110
111/* IPV6*/ 111/* IPV6*/
112#define MMC_RX_IPV6_GD_OCTETS 0x00000264 112#define MMC_RX_IPV6_GD_OCTETS 0x164
113#define MMC_RX_IPV6_HDERR_OCTETS 0x00000268 113#define MMC_RX_IPV6_HDERR_OCTETS 0x168
114#define MMC_RX_IPV6_NOPAY_OCTETS 0x0000026c 114#define MMC_RX_IPV6_NOPAY_OCTETS 0x16c
115 115
116#define MMC_RX_IPV6_GD 0x00000224 116#define MMC_RX_IPV6_GD 0x124
117#define MMC_RX_IPV6_HDERR 0x00000228 117#define MMC_RX_IPV6_HDERR 0x128
118#define MMC_RX_IPV6_NOPAY 0x0000022c 118#define MMC_RX_IPV6_NOPAY 0x12c
119 119
120/* Protocols*/ 120/* Protocols*/
121#define MMC_RX_UDP_GD 0x00000230 121#define MMC_RX_UDP_GD 0x130
122#define MMC_RX_UDP_ERR 0x00000234 122#define MMC_RX_UDP_ERR 0x134
123#define MMC_RX_TCP_GD 0x00000238 123#define MMC_RX_TCP_GD 0x138
124#define MMC_RX_TCP_ERR 0x0000023c 124#define MMC_RX_TCP_ERR 0x13c
125#define MMC_RX_ICMP_GD 0x00000240 125#define MMC_RX_ICMP_GD 0x140
126#define MMC_RX_ICMP_ERR 0x00000244 126#define MMC_RX_ICMP_ERR 0x144
127 127
128#define MMC_RX_UDP_GD_OCTETS 0x00000270 128#define MMC_RX_UDP_GD_OCTETS 0x170
129#define MMC_RX_UDP_ERR_OCTETS 0x00000274 129#define MMC_RX_UDP_ERR_OCTETS 0x174
130#define MMC_RX_TCP_GD_OCTETS 0x00000278 130#define MMC_RX_TCP_GD_OCTETS 0x178
131#define MMC_RX_TCP_ERR_OCTETS 0x0000027c 131#define MMC_RX_TCP_ERR_OCTETS 0x17c
132#define MMC_RX_ICMP_GD_OCTETS 0x00000280 132#define MMC_RX_ICMP_GD_OCTETS 0x180
133#define MMC_RX_ICMP_ERR_OCTETS 0x00000284 133#define MMC_RX_ICMP_ERR_OCTETS 0x184
134 134
135void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) 135void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
136{ 136{
137 u32 value = readl(ioaddr + MMC_CNTRL); 137 u32 value = readl(mmcaddr + MMC_CNTRL);
138 138
139 value |= (mode & 0x3F); 139 value |= (mode & 0x3F);
140 140
141 writel(value, ioaddr + MMC_CNTRL); 141 writel(value, mmcaddr + MMC_CNTRL);
142 142
143 pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", 143 pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
144 MMC_CNTRL, value); 144 MMC_CNTRL, value);
145} 145}
146 146
147/* To mask all all interrupts.*/ 147/* To mask all all interrupts.*/
148void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) 148void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
149{ 149{
150 writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); 150 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
151 writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); 151 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
152 writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK); 152 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK);
153} 153}
154 154
155/* This reads the MAC core counters (if actaully supported). 155/* This reads the MAC core counters (if actaully supported).
@@ -157,111 +157,116 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
157 * counter after a read. So all the field of the mmc struct 157 * counter after a read. So all the field of the mmc struct
158 * have to be incremented. 158 * have to be incremented.
159 */ 159 */
160void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc) 160void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
161{ 161{
162 mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB); 162 mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB);
163 mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB); 163 mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB);
164 mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G); 164 mmc->mmc_tx_broadcastframe_g += readl(mmcaddr +
165 mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G); 165 MMC_TX_BROADCASTFRAME_G);
166 mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB); 166 mmc->mmc_tx_multicastframe_g += readl(mmcaddr +
167 MMC_TX_MULTICASTFRAME_G);
168 mmc->mmc_tx_64_octets_gb += readl(mmcaddr + MMC_TX_64_OCTETS_GB);
167 mmc->mmc_tx_65_to_127_octets_gb += 169 mmc->mmc_tx_65_to_127_octets_gb +=
168 readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB); 170 readl(mmcaddr + MMC_TX_65_TO_127_OCTETS_GB);
169 mmc->mmc_tx_128_to_255_octets_gb += 171 mmc->mmc_tx_128_to_255_octets_gb +=
170 readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB); 172 readl(mmcaddr + MMC_TX_128_TO_255_OCTETS_GB);
171 mmc->mmc_tx_256_to_511_octets_gb += 173 mmc->mmc_tx_256_to_511_octets_gb +=
172 readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB); 174 readl(mmcaddr + MMC_TX_256_TO_511_OCTETS_GB);
173 mmc->mmc_tx_512_to_1023_octets_gb += 175 mmc->mmc_tx_512_to_1023_octets_gb +=
174 readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB); 176 readl(mmcaddr + MMC_TX_512_TO_1023_OCTETS_GB);
175 mmc->mmc_tx_1024_to_max_octets_gb += 177 mmc->mmc_tx_1024_to_max_octets_gb +=
176 readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB); 178 readl(mmcaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
177 mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB); 179 mmc->mmc_tx_unicast_gb += readl(mmcaddr + MMC_TX_UNICAST_GB);
178 mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB); 180 mmc->mmc_tx_multicast_gb += readl(mmcaddr + MMC_TX_MULTICAST_GB);
179 mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB); 181 mmc->mmc_tx_broadcast_gb += readl(mmcaddr + MMC_TX_BROADCAST_GB);
180 mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR); 182 mmc->mmc_tx_underflow_error += readl(mmcaddr + MMC_TX_UNDERFLOW_ERROR);
181 mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G); 183 mmc->mmc_tx_singlecol_g += readl(mmcaddr + MMC_TX_SINGLECOL_G);
182 mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G); 184 mmc->mmc_tx_multicol_g += readl(mmcaddr + MMC_TX_MULTICOL_G);
183 mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED); 185 mmc->mmc_tx_deferred += readl(mmcaddr + MMC_TX_DEFERRED);
184 mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL); 186 mmc->mmc_tx_latecol += readl(mmcaddr + MMC_TX_LATECOL);
185 mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL); 187 mmc->mmc_tx_exesscol += readl(mmcaddr + MMC_TX_EXESSCOL);
186 mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR); 188 mmc->mmc_tx_carrier_error += readl(mmcaddr + MMC_TX_CARRIER_ERROR);
187 mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G); 189 mmc->mmc_tx_octetcount_g += readl(mmcaddr + MMC_TX_OCTETCOUNT_G);
188 mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G); 190 mmc->mmc_tx_framecount_g += readl(mmcaddr + MMC_TX_FRAMECOUNT_G);
189 mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF); 191 mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF);
190 mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME); 192 mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME);
191 mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G); 193 mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G);
192 194
193 /* MMC RX counter registers */ 195 /* MMC RX counter registers */
194 mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB); 196 mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB);
195 mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB); 197 mmc->mmc_rx_octetcount_gb += readl(mmcaddr + MMC_RX_OCTETCOUNT_GB);
196 mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); 198 mmc->mmc_rx_octetcount_g += readl(mmcaddr + MMC_RX_OCTETCOUNT_G);
197 mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); 199 mmc->mmc_rx_broadcastframe_g += readl(mmcaddr +
198 mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); 200 MMC_RX_BROADCASTFRAME_G);
199 mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERROR); 201 mmc->mmc_rx_multicastframe_g += readl(mmcaddr +
200 mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); 202 MMC_RX_MULTICASTFRAME_G);
201 mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); 203 mmc->mmc_rx_crc_error += readl(mmcaddr + MMC_RX_CRC_ERROR);
202 mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); 204 mmc->mmc_rx_align_error += readl(mmcaddr + MMC_RX_ALIGN_ERROR);
203 mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G); 205 mmc->mmc_rx_run_error += readl(mmcaddr + MMC_RX_RUN_ERROR);
204 mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G); 206 mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_RX_JABBER_ERROR);
205 mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB); 207 mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_RX_UNDERSIZE_G);
208 mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_RX_OVERSIZE_G);
209 mmc->mmc_rx_64_octets_gb += readl(mmcaddr + MMC_RX_64_OCTETS_GB);
206 mmc->mmc_rx_65_to_127_octets_gb += 210 mmc->mmc_rx_65_to_127_octets_gb +=
207 readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB); 211 readl(mmcaddr + MMC_RX_65_TO_127_OCTETS_GB);
208 mmc->mmc_rx_128_to_255_octets_gb += 212 mmc->mmc_rx_128_to_255_octets_gb +=
209 readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB); 213 readl(mmcaddr + MMC_RX_128_TO_255_OCTETS_GB);
210 mmc->mmc_rx_256_to_511_octets_gb += 214 mmc->mmc_rx_256_to_511_octets_gb +=
211 readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB); 215 readl(mmcaddr + MMC_RX_256_TO_511_OCTETS_GB);
212 mmc->mmc_rx_512_to_1023_octets_gb += 216 mmc->mmc_rx_512_to_1023_octets_gb +=
213 readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB); 217 readl(mmcaddr + MMC_RX_512_TO_1023_OCTETS_GB);
214 mmc->mmc_rx_1024_to_max_octets_gb += 218 mmc->mmc_rx_1024_to_max_octets_gb +=
215 readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB); 219 readl(mmcaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
216 mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G); 220 mmc->mmc_rx_unicast_g += readl(mmcaddr + MMC_RX_UNICAST_G);
217 mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR); 221 mmc->mmc_rx_length_error += readl(mmcaddr + MMC_RX_LENGTH_ERROR);
218 mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE); 222 mmc->mmc_rx_autofrangetype += readl(mmcaddr + MMC_RX_AUTOFRANGETYPE);
219 mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES); 223 mmc->mmc_rx_pause_frames += readl(mmcaddr + MMC_RX_PAUSE_FRAMES);
220 mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW); 224 mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
221 mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB); 225 mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
222 mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR); 226 mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
223 /* IPC */ 227 /* IPC */
224 mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK); 228 mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK);
225 mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR); 229 mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR);
226 /* IPv4 */ 230 /* IPv4 */
227 mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD); 231 mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
228 mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR); 232 mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
229 mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY); 233 mmc->mmc_rx_ipv4_nopay += readl(mmcaddr + MMC_RX_IPV4_NOPAY);
230 mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG); 234 mmc->mmc_rx_ipv4_frag += readl(mmcaddr + MMC_RX_IPV4_FRAG);
231 mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL); 235 mmc->mmc_rx_ipv4_udsbl += readl(mmcaddr + MMC_RX_IPV4_UDSBL);
232 236
233 mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS); 237 mmc->mmc_rx_ipv4_gd_octets += readl(mmcaddr + MMC_RX_IPV4_GD_OCTETS);
234 mmc->mmc_rx_ipv4_hderr_octets += 238 mmc->mmc_rx_ipv4_hderr_octets +=
235 readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS); 239 readl(mmcaddr + MMC_RX_IPV4_HDERR_OCTETS);
236 mmc->mmc_rx_ipv4_nopay_octets += 240 mmc->mmc_rx_ipv4_nopay_octets +=
237 readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS); 241 readl(mmcaddr + MMC_RX_IPV4_NOPAY_OCTETS);
238 mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS); 242 mmc->mmc_rx_ipv4_frag_octets += readl(mmcaddr +
243 MMC_RX_IPV4_FRAG_OCTETS);
239 mmc->mmc_rx_ipv4_udsbl_octets += 244 mmc->mmc_rx_ipv4_udsbl_octets +=
240 readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS); 245 readl(mmcaddr + MMC_RX_IPV4_UDSBL_OCTETS);
241 246
242 /* IPV6 */ 247 /* IPV6 */
243 mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS); 248 mmc->mmc_rx_ipv6_gd_octets += readl(mmcaddr + MMC_RX_IPV6_GD_OCTETS);
244 mmc->mmc_rx_ipv6_hderr_octets += 249 mmc->mmc_rx_ipv6_hderr_octets +=
245 readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS); 250 readl(mmcaddr + MMC_RX_IPV6_HDERR_OCTETS);
246 mmc->mmc_rx_ipv6_nopay_octets += 251 mmc->mmc_rx_ipv6_nopay_octets +=
247 readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS); 252 readl(mmcaddr + MMC_RX_IPV6_NOPAY_OCTETS);
248 253
249 mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD); 254 mmc->mmc_rx_ipv6_gd += readl(mmcaddr + MMC_RX_IPV6_GD);
250 mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR); 255 mmc->mmc_rx_ipv6_hderr += readl(mmcaddr + MMC_RX_IPV6_HDERR);
251 mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY); 256 mmc->mmc_rx_ipv6_nopay += readl(mmcaddr + MMC_RX_IPV6_NOPAY);
252 257
253 /* Protocols */ 258 /* Protocols */
254 mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD); 259 mmc->mmc_rx_udp_gd += readl(mmcaddr + MMC_RX_UDP_GD);
255 mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR); 260 mmc->mmc_rx_udp_err += readl(mmcaddr + MMC_RX_UDP_ERR);
256 mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD); 261 mmc->mmc_rx_tcp_gd += readl(mmcaddr + MMC_RX_TCP_GD);
257 mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR); 262 mmc->mmc_rx_tcp_err += readl(mmcaddr + MMC_RX_TCP_ERR);
258 mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD); 263 mmc->mmc_rx_icmp_gd += readl(mmcaddr + MMC_RX_ICMP_GD);
259 mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR); 264 mmc->mmc_rx_icmp_err += readl(mmcaddr + MMC_RX_ICMP_ERR);
260 265
261 mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS); 266 mmc->mmc_rx_udp_gd_octets += readl(mmcaddr + MMC_RX_UDP_GD_OCTETS);
262 mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS); 267 mmc->mmc_rx_udp_err_octets += readl(mmcaddr + MMC_RX_UDP_ERR_OCTETS);
263 mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS); 268 mmc->mmc_rx_tcp_gd_octets += readl(mmcaddr + MMC_RX_TCP_GD_OCTETS);
264 mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS); 269 mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS);
265 mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS); 270 mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS);
266 mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS); 271 mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS);
267} 272}
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index e13228f115f0..2beacd0d3043 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -199,11 +199,6 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
199{ 199{
200 unsigned int tdes1 = p->des1; 200 unsigned int tdes1 = p->des1;
201 201
202 if (mode == STMMAC_CHAIN_MODE)
203 norm_set_tx_desc_len_on_chain(p, len);
204 else
205 norm_set_tx_desc_len_on_ring(p, len);
206
207 if (is_fs) 202 if (is_fs)
208 tdes1 |= TDES1_FIRST_SEGMENT; 203 tdes1 |= TDES1_FIRST_SEGMENT;
209 else 204 else
@@ -217,10 +212,15 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
217 if (ls) 212 if (ls)
218 tdes1 |= TDES1_LAST_SEGMENT; 213 tdes1 |= TDES1_LAST_SEGMENT;
219 214
220 if (tx_own)
221 tdes1 |= TDES0_OWN;
222
223 p->des1 = tdes1; 215 p->des1 = tdes1;
216
217 if (mode == STMMAC_CHAIN_MODE)
218 norm_set_tx_desc_len_on_chain(p, len);
219 else
220 norm_set_tx_desc_len_on_ring(p, len);
221
222 if (tx_own)
223 p->des0 |= TDES0_OWN;
224} 224}
225 225
226static void ndesc_set_tx_ic(struct dma_desc *p) 226static void ndesc_set_tx_ic(struct dma_desc *p)
@@ -279,6 +279,26 @@ static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
279 return 1; 279 return 1;
280} 280}
281 281
282static void ndesc_display_ring(void *head, unsigned int size, bool rx)
283{
284 struct dma_desc *p = (struct dma_desc *)head;
285 int i;
286
287 pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
288
289 for (i = 0; i < size; i++) {
290 u64 x;
291
292 x = *(u64 *)p;
293 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
294 i, (unsigned int)virt_to_phys(p),
295 (unsigned int)x, (unsigned int)(x >> 32),
296 p->des2, p->des3);
297 p++;
298 }
299 pr_info("\n");
300}
301
282const struct stmmac_desc_ops ndesc_ops = { 302const struct stmmac_desc_ops ndesc_ops = {
283 .tx_status = ndesc_get_tx_status, 303 .tx_status = ndesc_get_tx_status,
284 .rx_status = ndesc_get_rx_status, 304 .rx_status = ndesc_get_rx_status,
@@ -297,4 +317,5 @@ const struct stmmac_desc_ops ndesc_ops = {
297 .get_tx_timestamp_status = ndesc_get_tx_timestamp_status, 317 .get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
298 .get_timestamp = ndesc_get_timestamp, 318 .get_timestamp = ndesc_get_timestamp,
299 .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, 319 .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
320 .display_ring = ndesc_display_ring,
300}; 321};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 8bbab97895fe..ff6750621ff7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,7 +24,7 @@
24#define __STMMAC_H__ 24#define __STMMAC_H__
25 25
26#define STMMAC_RESOURCE_NAME "stmmaceth" 26#define STMMAC_RESOURCE_NAME "stmmaceth"
27#define DRV_MODULE_VERSION "Oct_2015" 27#define DRV_MODULE_VERSION "Jan_2016"
28 28
29#include <linux/clk.h> 29#include <linux/clk.h>
30#include <linux/stmmac.h> 30#include <linux/stmmac.h>
@@ -67,6 +67,7 @@ struct stmmac_priv {
67 spinlock_t tx_lock; 67 spinlock_t tx_lock;
68 bool tx_path_in_lpi_mode; 68 bool tx_path_in_lpi_mode;
69 struct timer_list txtimer; 69 struct timer_list txtimer;
70 bool tso;
70 71
71 struct dma_desc *dma_rx ____cacheline_aligned_in_smp; 72 struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
72 struct dma_extended_desc *dma_erx; 73 struct dma_extended_desc *dma_erx;
@@ -128,6 +129,10 @@ struct stmmac_priv {
128 int use_riwt; 129 int use_riwt;
129 int irq_wake; 130 int irq_wake;
130 spinlock_t ptp_lock; 131 spinlock_t ptp_lock;
132 void __iomem *mmcaddr;
133 u32 rx_tail_addr;
134 u32 tx_tail_addr;
135 u32 mss;
131 136
132#ifdef CONFIG_DEBUG_FS 137#ifdef CONFIG_DEBUG_FS
133 struct dentry *dbgfs_dir; 138 struct dentry *dbgfs_dir;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 3c7928edfebb..e2b98b01647e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -161,6 +161,9 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
161 STMMAC_STAT(mtl_rx_fifo_ctrl_active), 161 STMMAC_STAT(mtl_rx_fifo_ctrl_active),
162 STMMAC_STAT(mac_rx_frame_ctrl_fifo), 162 STMMAC_STAT(mac_rx_frame_ctrl_fifo),
163 STMMAC_STAT(mac_gmii_rx_proto_engine), 163 STMMAC_STAT(mac_gmii_rx_proto_engine),
164 /* TSO */
165 STMMAC_STAT(tx_tso_frames),
166 STMMAC_STAT(tx_tso_nfrags),
164}; 167};
165#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) 168#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
166 169
@@ -499,14 +502,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
499 int i, j = 0; 502 int i, j = 0;
500 503
501 /* Update the DMA HW counters for dwmac10/100 */ 504 /* Update the DMA HW counters for dwmac10/100 */
502 if (!priv->plat->has_gmac) 505 if (priv->hw->dma->dma_diagnostic_fr)
503 priv->hw->dma->dma_diagnostic_fr(&dev->stats, 506 priv->hw->dma->dma_diagnostic_fr(&dev->stats,
504 (void *) &priv->xstats, 507 (void *) &priv->xstats,
505 priv->ioaddr); 508 priv->ioaddr);
506 else { 509 else {
507 /* If supported, for new GMAC chips expose the MMC counters */ 510 /* If supported, for new GMAC chips expose the MMC counters */
508 if (priv->dma_cap.rmon) { 511 if (priv->dma_cap.rmon) {
509 dwmac_mmc_read(priv->ioaddr, &priv->mmc); 512 dwmac_mmc_read(priv->mmcaddr, &priv->mmc);
510 513
511 for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { 514 for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
512 char *p; 515 char *p;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4c5ce9848ca9..b87edb72e80a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -56,6 +56,7 @@
56#include "dwmac1000.h" 56#include "dwmac1000.h"
57 57
58#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 58#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
59#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
59 60
60/* Module parameters */ 61/* Module parameters */
61#define TX_TIMEO 5000 62#define TX_TIMEO 5000
@@ -278,7 +279,6 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
278 */ 279 */
279bool stmmac_eee_init(struct stmmac_priv *priv) 280bool stmmac_eee_init(struct stmmac_priv *priv)
280{ 281{
281 char *phy_bus_name = priv->plat->phy_bus_name;
282 unsigned long flags; 282 unsigned long flags;
283 bool ret = false; 283 bool ret = false;
284 284
@@ -289,10 +289,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
289 (priv->pcs == STMMAC_PCS_RTBI)) 289 (priv->pcs == STMMAC_PCS_RTBI))
290 goto out; 290 goto out;
291 291
292 /* Never init EEE in case of a switch is attached */
293 if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
294 goto out;
295
296 /* MAC core supports the EEE feature. */ 292 /* MAC core supports the EEE feature. */
297 if (priv->dma_cap.eee) { 293 if (priv->dma_cap.eee) {
298 int tx_lpi_timer = priv->tx_lpi_timer; 294 int tx_lpi_timer = priv->tx_lpi_timer;
@@ -726,13 +722,15 @@ static void stmmac_adjust_link(struct net_device *dev)
726 new_state = 1; 722 new_state = 1;
727 switch (phydev->speed) { 723 switch (phydev->speed) {
728 case 1000: 724 case 1000:
729 if (likely(priv->plat->has_gmac)) 725 if (likely((priv->plat->has_gmac) ||
726 (priv->plat->has_gmac4)))
730 ctrl &= ~priv->hw->link.port; 727 ctrl &= ~priv->hw->link.port;
731 stmmac_hw_fix_mac_speed(priv); 728 stmmac_hw_fix_mac_speed(priv);
732 break; 729 break;
733 case 100: 730 case 100:
734 case 10: 731 case 10:
735 if (priv->plat->has_gmac) { 732 if (likely((priv->plat->has_gmac) ||
733 (priv->plat->has_gmac4))) {
736 ctrl |= priv->hw->link.port; 734 ctrl |= priv->hw->link.port;
737 if (phydev->speed == SPEED_100) { 735 if (phydev->speed == SPEED_100) {
738 ctrl |= priv->hw->link.speed; 736 ctrl |= priv->hw->link.speed;
@@ -772,10 +770,16 @@ static void stmmac_adjust_link(struct net_device *dev)
772 770
773 spin_unlock_irqrestore(&priv->lock, flags); 771 spin_unlock_irqrestore(&priv->lock, flags);
774 772
775 /* At this stage, it could be needed to setup the EEE or adjust some 773 if (phydev->is_pseudo_fixed_link)
776 * MAC related HW registers. 774 /* Stop PHY layer to call the hook to adjust the link in case
777 */ 775 * of a switch is attached to the stmmac driver.
778 priv->eee_enabled = stmmac_eee_init(priv); 776 */
777 phydev->irq = PHY_IGNORE_INTERRUPT;
778 else
779 /* At this stage, init the EEE if supported.
780 * Never called in case of fixed_link.
781 */
782 priv->eee_enabled = stmmac_eee_init(priv);
779} 783}
780 784
781/** 785/**
@@ -827,12 +831,8 @@ static int stmmac_init_phy(struct net_device *dev)
827 phydev = of_phy_connect(dev, priv->plat->phy_node, 831 phydev = of_phy_connect(dev, priv->plat->phy_node,
828 &stmmac_adjust_link, 0, interface); 832 &stmmac_adjust_link, 0, interface);
829 } else { 833 } else {
830 if (priv->plat->phy_bus_name) 834 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
831 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", 835 priv->plat->bus_id);
832 priv->plat->phy_bus_name, priv->plat->bus_id);
833 else
834 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
835 priv->plat->bus_id);
836 836
837 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 837 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
838 priv->plat->phy_addr); 838 priv->plat->phy_addr);
@@ -870,11 +870,6 @@ static int stmmac_init_phy(struct net_device *dev)
870 return -ENODEV; 870 return -ENODEV;
871 } 871 }
872 872
873 /* If attached to a switch, there is no reason to poll phy handler */
874 if (priv->plat->phy_bus_name)
875 if (!strcmp(priv->plat->phy_bus_name, "fixed"))
876 phydev->irq = PHY_IGNORE_INTERRUPT;
877
878 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" 873 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
879 " Link = %d\n", dev->name, phydev->phy_id, phydev->link); 874 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
880 875
@@ -883,53 +878,22 @@ static int stmmac_init_phy(struct net_device *dev)
883 return 0; 878 return 0;
884} 879}
885 880
886/**
887 * stmmac_display_ring - display ring
888 * @head: pointer to the head of the ring passed.
889 * @size: size of the ring.
890 * @extend_desc: to verify if extended descriptors are used.
891 * Description: display the control/status and buffer descriptors.
892 */
893static void stmmac_display_ring(void *head, int size, int extend_desc)
894{
895 int i;
896 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
897 struct dma_desc *p = (struct dma_desc *)head;
898
899 for (i = 0; i < size; i++) {
900 u64 x;
901 if (extend_desc) {
902 x = *(u64 *) ep;
903 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
904 i, (unsigned int)virt_to_phys(ep),
905 (unsigned int)x, (unsigned int)(x >> 32),
906 ep->basic.des2, ep->basic.des3);
907 ep++;
908 } else {
909 x = *(u64 *) p;
910 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
911 i, (unsigned int)virt_to_phys(p),
912 (unsigned int)x, (unsigned int)(x >> 32),
913 p->des2, p->des3);
914 p++;
915 }
916 pr_info("\n");
917 }
918}
919
920static void stmmac_display_rings(struct stmmac_priv *priv) 881static void stmmac_display_rings(struct stmmac_priv *priv)
921{ 882{
883 void *head_rx, *head_tx;
884
922 if (priv->extend_desc) { 885 if (priv->extend_desc) {
923 pr_info("Extended RX descriptor ring:\n"); 886 head_rx = (void *)priv->dma_erx;
924 stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1); 887 head_tx = (void *)priv->dma_etx;
925 pr_info("Extended TX descriptor ring:\n");
926 stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1);
927 } else { 888 } else {
928 pr_info("RX descriptor ring:\n"); 889 head_rx = (void *)priv->dma_rx;
929 stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0); 890 head_tx = (void *)priv->dma_tx;
930 pr_info("TX descriptor ring:\n");
931 stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0);
932 } 891 }
892
893 /* Display Rx ring */
894 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
895 /* Display Tx ring */
896 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
933} 897}
934 898
935static int stmmac_set_bfsize(int mtu, int bufsize) 899static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -1008,7 +972,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1008 return -EINVAL; 972 return -EINVAL;
1009 } 973 }
1010 974
1011 p->des2 = priv->rx_skbuff_dma[i]; 975 if (priv->synopsys_id >= DWMAC_CORE_4_00)
976 p->des0 = priv->rx_skbuff_dma[i];
977 else
978 p->des2 = priv->rx_skbuff_dma[i];
1012 979
1013 if ((priv->hw->mode->init_desc3) && 980 if ((priv->hw->mode->init_desc3) &&
1014 (priv->dma_buf_sz == BUF_SIZE_16KiB)) 981 (priv->dma_buf_sz == BUF_SIZE_16KiB))
@@ -1099,7 +1066,16 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1099 p = &((priv->dma_etx + i)->basic); 1066 p = &((priv->dma_etx + i)->basic);
1100 else 1067 else
1101 p = priv->dma_tx + i; 1068 p = priv->dma_tx + i;
1102 p->des2 = 0; 1069
1070 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1071 p->des0 = 0;
1072 p->des1 = 0;
1073 p->des2 = 0;
1074 p->des3 = 0;
1075 } else {
1076 p->des2 = 0;
1077 }
1078
1103 priv->tx_skbuff_dma[i].buf = 0; 1079 priv->tx_skbuff_dma[i].buf = 0;
1104 priv->tx_skbuff_dma[i].map_as_page = false; 1080 priv->tx_skbuff_dma[i].map_as_page = false;
1105 priv->tx_skbuff_dma[i].len = 0; 1081 priv->tx_skbuff_dma[i].len = 0;
@@ -1362,9 +1338,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1362 priv->tx_skbuff_dma[entry].len, 1338 priv->tx_skbuff_dma[entry].len,
1363 DMA_TO_DEVICE); 1339 DMA_TO_DEVICE);
1364 priv->tx_skbuff_dma[entry].buf = 0; 1340 priv->tx_skbuff_dma[entry].buf = 0;
1341 priv->tx_skbuff_dma[entry].len = 0;
1365 priv->tx_skbuff_dma[entry].map_as_page = false; 1342 priv->tx_skbuff_dma[entry].map_as_page = false;
1366 } 1343 }
1367 priv->hw->mode->clean_desc3(priv, p); 1344
1345 if (priv->hw->mode->clean_desc3)
1346 priv->hw->mode->clean_desc3(priv, p);
1347
1368 priv->tx_skbuff_dma[entry].last_segment = false; 1348 priv->tx_skbuff_dma[entry].last_segment = false;
1369 priv->tx_skbuff_dma[entry].is_jumbo = false; 1349 priv->tx_skbuff_dma[entry].is_jumbo = false;
1370 1350
@@ -1487,41 +1467,23 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1487static void stmmac_mmc_setup(struct stmmac_priv *priv) 1467static void stmmac_mmc_setup(struct stmmac_priv *priv)
1488{ 1468{
1489 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 1469 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1490 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 1470 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1491 1471
1492 dwmac_mmc_intr_all_mask(priv->ioaddr); 1472 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1473 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1474 else
1475 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1476
1477 dwmac_mmc_intr_all_mask(priv->mmcaddr);
1493 1478
1494 if (priv->dma_cap.rmon) { 1479 if (priv->dma_cap.rmon) {
1495 dwmac_mmc_ctrl(priv->ioaddr, mode); 1480 dwmac_mmc_ctrl(priv->mmcaddr, mode);
1496 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 1481 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1497 } else 1482 } else
1498 pr_info(" No MAC Management Counters available\n"); 1483 pr_info(" No MAC Management Counters available\n");
1499} 1484}
1500 1485
1501/** 1486/**
1502 * stmmac_get_synopsys_id - return the SYINID.
1503 * @priv: driver private structure
1504 * Description: this simple function is to decode and return the SYINID
1505 * starting from the HW core register.
1506 */
1507static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
1508{
1509 u32 hwid = priv->hw->synopsys_uid;
1510
1511 /* Check Synopsys Id (not available on old chips) */
1512 if (likely(hwid)) {
1513 u32 uid = ((hwid & 0x0000ff00) >> 8);
1514 u32 synid = (hwid & 0x000000ff);
1515
1516 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
1517 uid, synid);
1518
1519 return synid;
1520 }
1521 return 0;
1522}
1523
1524/**
1525 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors 1487 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1526 * @priv: driver private structure 1488 * @priv: driver private structure
1527 * Description: select the Enhanced/Alternate or Normal descriptors. 1489 * Description: select the Enhanced/Alternate or Normal descriptors.
@@ -1558,51 +1520,15 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1558 */ 1520 */
1559static int stmmac_get_hw_features(struct stmmac_priv *priv) 1521static int stmmac_get_hw_features(struct stmmac_priv *priv)
1560{ 1522{
1561 u32 hw_cap = 0; 1523 u32 ret = 0;
1562 1524
1563 if (priv->hw->dma->get_hw_feature) { 1525 if (priv->hw->dma->get_hw_feature) {
1564 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr); 1526 priv->hw->dma->get_hw_feature(priv->ioaddr,
1565 1527 &priv->dma_cap);
1566 priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); 1528 ret = 1;
1567 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; 1529 }
1568 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; 1530
1569 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; 1531 return ret;
1570 priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
1571 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
1572 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
1573 priv->dma_cap.pmt_remote_wake_up =
1574 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
1575 priv->dma_cap.pmt_magic_frame =
1576 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
1577 /* MMC */
1578 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
1579 /* IEEE 1588-2002 */
1580 priv->dma_cap.time_stamp =
1581 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
1582 /* IEEE 1588-2008 */
1583 priv->dma_cap.atime_stamp =
1584 (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
1585 /* 802.3az - Energy-Efficient Ethernet (EEE) */
1586 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
1587 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
1588 /* TX and RX csum */
1589 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
1590 priv->dma_cap.rx_coe_type1 =
1591 (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
1592 priv->dma_cap.rx_coe_type2 =
1593 (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
1594 priv->dma_cap.rxfifo_over_2048 =
1595 (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
1596 /* TX and RX number of channels */
1597 priv->dma_cap.number_rx_channel =
1598 (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
1599 priv->dma_cap.number_tx_channel =
1600 (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
1601 /* Alternate (enhanced) DESC mode */
1602 priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
1603 }
1604
1605 return hw_cap;
1606} 1532}
1607 1533
1608/** 1534/**
@@ -1658,8 +1584,19 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1658 priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, 1584 priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
1659 aal, priv->dma_tx_phy, priv->dma_rx_phy, atds); 1585 aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
1660 1586
1661 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && 1587 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1662 (priv->plat->axi && priv->hw->dma->axi)) 1588 priv->rx_tail_addr = priv->dma_rx_phy +
1589 (DMA_RX_SIZE * sizeof(struct dma_desc));
1590 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
1591 STMMAC_CHAN0);
1592
1593 priv->tx_tail_addr = priv->dma_tx_phy +
1594 (DMA_TX_SIZE * sizeof(struct dma_desc));
1595 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
1596 STMMAC_CHAN0);
1597 }
1598
1599 if (priv->plat->axi && priv->hw->dma->axi)
1663 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); 1600 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1664 1601
1665 return ret; 1602 return ret;
@@ -1739,7 +1676,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1739 } 1676 }
1740 1677
1741 /* Enable the MAC Rx/Tx */ 1678 /* Enable the MAC Rx/Tx */
1742 stmmac_set_mac(priv->ioaddr, true); 1679 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1680 stmmac_dwmac4_set_mac(priv->ioaddr, true);
1681 else
1682 stmmac_set_mac(priv->ioaddr, true);
1743 1683
1744 /* Set the HW DMA mode and the COE */ 1684 /* Set the HW DMA mode and the COE */
1745 stmmac_dma_operation_mode(priv); 1685 stmmac_dma_operation_mode(priv);
@@ -1777,6 +1717,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1777 if (priv->pcs && priv->hw->mac->ctrl_ane) 1717 if (priv->pcs && priv->hw->mac->ctrl_ane)
1778 priv->hw->mac->ctrl_ane(priv->hw, 0); 1718 priv->hw->mac->ctrl_ane(priv->hw, 0);
1779 1719
1720 /* set TX ring length */
1721 if (priv->hw->dma->set_tx_ring_len)
1722 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
1723 (DMA_TX_SIZE - 1));
1724 /* set RX ring length */
1725 if (priv->hw->dma->set_rx_ring_len)
1726 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1727 (DMA_RX_SIZE - 1));
1728 /* Enable TSO */
1729 if (priv->tso)
1730 priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
1731
1780 return 0; 1732 return 0;
1781} 1733}
1782 1734
@@ -1942,6 +1894,239 @@ static int stmmac_release(struct net_device *dev)
1942} 1894}
1943 1895
1944/** 1896/**
1897 * stmmac_tso_allocator - close entry point of the driver
1898 * @priv: driver private structure
1899 * @des: buffer start address
1900 * @total_len: total length to fill in descriptors
1901 * @last_segmant: condition for the last descriptor
1902 * Description:
1903 * This function fills descriptor and request new descriptors according to
1904 * buffer length to fill
1905 */
1906static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
1907 int total_len, bool last_segment)
1908{
1909 struct dma_desc *desc;
1910 int tmp_len;
1911 u32 buff_size;
1912
1913 tmp_len = total_len;
1914
1915 while (tmp_len > 0) {
1916 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
1917 desc = priv->dma_tx + priv->cur_tx;
1918
1919 desc->des0 = des + (total_len - tmp_len);
1920 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
1921 TSO_MAX_BUFF_SIZE : tmp_len;
1922
1923 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
1924 0, 1,
1925 (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
1926 0, 0);
1927
1928 tmp_len -= TSO_MAX_BUFF_SIZE;
1929 }
1930}
1931
1932/**
1933 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
1934 * @skb : the socket buffer
1935 * @dev : device pointer
1936 * Description: this is the transmit function that is called on TSO frames
1937 * (support available on GMAC4 and newer chips).
1938 * Diagram below show the ring programming in case of TSO frames:
1939 *
1940 * First Descriptor
1941 * --------
1942 * | DES0 |---> buffer1 = L2/L3/L4 header
1943 * | DES1 |---> TCP Payload (can continue on next descr...)
1944 * | DES2 |---> buffer 1 and 2 len
1945 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
1946 * --------
1947 * |
1948 * ...
1949 * |
1950 * --------
1951 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
1952 * | DES1 | --|
1953 * | DES2 | --> buffer 1 and 2 len
1954 * | DES3 |
1955 * --------
1956 *
1957 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
1958 */
1959static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
1960{
1961 u32 pay_len, mss;
1962 int tmp_pay_len = 0;
1963 struct stmmac_priv *priv = netdev_priv(dev);
1964 int nfrags = skb_shinfo(skb)->nr_frags;
1965 unsigned int first_entry, des;
1966 struct dma_desc *desc, *first, *mss_desc = NULL;
1967 u8 proto_hdr_len;
1968 int i;
1969
1970 spin_lock(&priv->tx_lock);
1971
1972 /* Compute header lengths */
1973 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1974
1975 /* Desc availability based on threshold should be enough safe */
1976 if (unlikely(stmmac_tx_avail(priv) <
1977 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
1978 if (!netif_queue_stopped(dev)) {
1979 netif_stop_queue(dev);
1980 /* This is a hard error, log it. */
1981 pr_err("%s: Tx Ring full when queue awake\n", __func__);
1982 }
1983 spin_unlock(&priv->tx_lock);
1984 return NETDEV_TX_BUSY;
1985 }
1986
1987 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
1988
1989 mss = skb_shinfo(skb)->gso_size;
1990
1991 /* set new MSS value if needed */
1992 if (mss != priv->mss) {
1993 mss_desc = priv->dma_tx + priv->cur_tx;
1994 priv->hw->desc->set_mss(mss_desc, mss);
1995 priv->mss = mss;
1996 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
1997 }
1998
1999 if (netif_msg_tx_queued(priv)) {
2000 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2001 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2002 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2003 skb->data_len);
2004 }
2005
2006 first_entry = priv->cur_tx;
2007
2008 desc = priv->dma_tx + first_entry;
2009 first = desc;
2010
2011 /* first descriptor: fill Headers on Buf1 */
2012 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2013 DMA_TO_DEVICE);
2014 if (dma_mapping_error(priv->device, des))
2015 goto dma_map_err;
2016
2017 priv->tx_skbuff_dma[first_entry].buf = des;
2018 priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2019 priv->tx_skbuff[first_entry] = skb;
2020
2021 first->des0 = des;
2022
2023 /* Fill start of payload in buff2 of first descriptor */
2024 if (pay_len)
2025 first->des1 = des + proto_hdr_len;
2026
2027 /* If needed take extra descriptors to fill the remaining payload */
2028 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2029
2030 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
2031
2032 /* Prepare fragments */
2033 for (i = 0; i < nfrags; i++) {
2034 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2035
2036 des = skb_frag_dma_map(priv->device, frag, 0,
2037 skb_frag_size(frag),
2038 DMA_TO_DEVICE);
2039
2040 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2041 (i == nfrags - 1));
2042
2043 priv->tx_skbuff_dma[priv->cur_tx].buf = des;
2044 priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
2045 priv->tx_skbuff[priv->cur_tx] = NULL;
2046 priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
2047 }
2048
2049 priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
2050
2051 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2052
2053 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2054 if (netif_msg_hw(priv))
2055 pr_debug("%s: stop transmitted packets\n", __func__);
2056 netif_stop_queue(dev);
2057 }
2058
2059 dev->stats.tx_bytes += skb->len;
2060 priv->xstats.tx_tso_frames++;
2061 priv->xstats.tx_tso_nfrags += nfrags;
2062
2063 /* Manage tx mitigation */
2064 priv->tx_count_frames += nfrags + 1;
2065 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2066 mod_timer(&priv->txtimer,
2067 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2068 } else {
2069 priv->tx_count_frames = 0;
2070 priv->hw->desc->set_tx_ic(desc);
2071 priv->xstats.tx_set_ic_bit++;
2072 }
2073
2074 if (!priv->hwts_tx_en)
2075 skb_tx_timestamp(skb);
2076
2077 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2078 priv->hwts_tx_en)) {
2079 /* declare that device is doing timestamping */
2080 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2081 priv->hw->desc->enable_tx_timestamp(first);
2082 }
2083
2084 /* Complete the first descriptor before granting the DMA */
2085 priv->hw->desc->prepare_tso_tx_desc(first, 1,
2086 proto_hdr_len,
2087 pay_len,
2088 1, priv->tx_skbuff_dma[first_entry].last_segment,
2089 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2090
2091 /* If context desc is used to change MSS */
2092 if (mss_desc)
2093 priv->hw->desc->set_tx_owner(mss_desc);
2094
2095 /* The own bit must be the latest setting done when prepare the
2096 * descriptor and then barrier is needed to make sure that
2097 * all is coherent before granting the DMA engine.
2098 */
2099 smp_wmb();
2100
2101 if (netif_msg_pktdata(priv)) {
2102 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2103 __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2104 priv->cur_tx, first, nfrags);
2105
2106 priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
2107 0);
2108
2109 pr_info(">>> frame to be transmitted: ");
2110 print_pkt(skb->data, skb_headlen(skb));
2111 }
2112
2113 netdev_sent_queue(dev, skb->len);
2114
2115 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2116 STMMAC_CHAN0);
2117
2118 spin_unlock(&priv->tx_lock);
2119 return NETDEV_TX_OK;
2120
2121dma_map_err:
2122 spin_unlock(&priv->tx_lock);
2123 dev_err(priv->device, "Tx dma map failed\n");
2124 dev_kfree_skb(skb);
2125 priv->dev->stats.tx_dropped++;
2126 return NETDEV_TX_OK;
2127}
2128
2129/**
1945 * stmmac_xmit - Tx entry point of the driver 2130 * stmmac_xmit - Tx entry point of the driver
1946 * @skb : the socket buffer 2131 * @skb : the socket buffer
1947 * @dev : device pointer 2132 * @dev : device pointer
@@ -1958,6 +2143,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1958 unsigned int entry, first_entry; 2143 unsigned int entry, first_entry;
1959 struct dma_desc *desc, *first; 2144 struct dma_desc *desc, *first;
1960 unsigned int enh_desc; 2145 unsigned int enh_desc;
2146 unsigned int des;
2147
2148 /* Manage oversized TCP frames for GMAC4 device */
2149 if (skb_is_gso(skb) && priv->tso) {
2150 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2151 return stmmac_tso_xmit(skb, dev);
2152 }
1961 2153
1962 spin_lock(&priv->tx_lock); 2154 spin_lock(&priv->tx_lock);
1963 2155
@@ -1993,7 +2185,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1993 if (enh_desc) 2185 if (enh_desc)
1994 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); 2186 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
1995 2187
1996 if (unlikely(is_jumbo)) { 2188 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2189 DWMAC_CORE_4_00)) {
1997 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); 2190 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
1998 if (unlikely(entry < 0)) 2191 if (unlikely(entry < 0))
1999 goto dma_map_err; 2192 goto dma_map_err;
@@ -2011,13 +2204,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2011 else 2204 else
2012 desc = priv->dma_tx + entry; 2205 desc = priv->dma_tx + entry;
2013 2206
2014 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 2207 des = skb_frag_dma_map(priv->device, frag, 0, len,
2015 DMA_TO_DEVICE); 2208 DMA_TO_DEVICE);
2016 if (dma_mapping_error(priv->device, desc->des2)) 2209 if (dma_mapping_error(priv->device, des))
2017 goto dma_map_err; /* should reuse desc w/o issues */ 2210 goto dma_map_err; /* should reuse desc w/o issues */
2018 2211
2019 priv->tx_skbuff[entry] = NULL; 2212 priv->tx_skbuff[entry] = NULL;
2020 priv->tx_skbuff_dma[entry].buf = desc->des2; 2213
2214 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2215 desc->des0 = des;
2216 priv->tx_skbuff_dma[entry].buf = desc->des0;
2217 } else {
2218 desc->des2 = des;
2219 priv->tx_skbuff_dma[entry].buf = desc->des2;
2220 }
2221
2021 priv->tx_skbuff_dma[entry].map_as_page = true; 2222 priv->tx_skbuff_dma[entry].map_as_page = true;
2022 priv->tx_skbuff_dma[entry].len = len; 2223 priv->tx_skbuff_dma[entry].len = len;
2023 priv->tx_skbuff_dma[entry].last_segment = last_segment; 2224 priv->tx_skbuff_dma[entry].last_segment = last_segment;
@@ -2032,16 +2233,18 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2032 priv->cur_tx = entry; 2233 priv->cur_tx = entry;
2033 2234
2034 if (netif_msg_pktdata(priv)) { 2235 if (netif_msg_pktdata(priv)) {
2236 void *tx_head;
2237
2035 pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 2238 pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2036 __func__, priv->cur_tx, priv->dirty_tx, first_entry, 2239 __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2037 entry, first, nfrags); 2240 entry, first, nfrags);
2038 2241
2039 if (priv->extend_desc) 2242 if (priv->extend_desc)
2040 stmmac_display_ring((void *)priv->dma_etx, 2243 tx_head = (void *)priv->dma_etx;
2041 DMA_TX_SIZE, 1);
2042 else 2244 else
2043 stmmac_display_ring((void *)priv->dma_tx, 2245 tx_head = (void *)priv->dma_tx;
2044 DMA_TX_SIZE, 0); 2246
2247 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2045 2248
2046 pr_debug(">>> frame to be transmitted: "); 2249 pr_debug(">>> frame to be transmitted: ");
2047 print_pkt(skb->data, skb->len); 2250 print_pkt(skb->data, skb->len);
@@ -2080,12 +2283,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2080 if (likely(!is_jumbo)) { 2283 if (likely(!is_jumbo)) {
2081 bool last_segment = (nfrags == 0); 2284 bool last_segment = (nfrags == 0);
2082 2285
2083 first->des2 = dma_map_single(priv->device, skb->data, 2286 des = dma_map_single(priv->device, skb->data,
2084 nopaged_len, DMA_TO_DEVICE); 2287 nopaged_len, DMA_TO_DEVICE);
2085 if (dma_mapping_error(priv->device, first->des2)) 2288 if (dma_mapping_error(priv->device, des))
2086 goto dma_map_err; 2289 goto dma_map_err;
2087 2290
2088 priv->tx_skbuff_dma[first_entry].buf = first->des2; 2291 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2292 first->des0 = des;
2293 priv->tx_skbuff_dma[first_entry].buf = first->des0;
2294 } else {
2295 first->des2 = des;
2296 priv->tx_skbuff_dma[first_entry].buf = first->des2;
2297 }
2298
2089 priv->tx_skbuff_dma[first_entry].len = nopaged_len; 2299 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2090 priv->tx_skbuff_dma[first_entry].last_segment = last_segment; 2300 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2091 2301
@@ -2109,7 +2319,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2109 } 2319 }
2110 2320
2111 netdev_sent_queue(dev, skb->len); 2321 netdev_sent_queue(dev, skb->len);
2112 priv->hw->dma->enable_dma_transmission(priv->ioaddr); 2322
2323 if (priv->synopsys_id < DWMAC_CORE_4_00)
2324 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2325 else
2326 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2327 STMMAC_CHAN0);
2113 2328
2114 spin_unlock(&priv->tx_lock); 2329 spin_unlock(&priv->tx_lock);
2115 return NETDEV_TX_OK; 2330 return NETDEV_TX_OK;
@@ -2191,9 +2406,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2191 dev_kfree_skb(skb); 2406 dev_kfree_skb(skb);
2192 break; 2407 break;
2193 } 2408 }
2194 p->des2 = priv->rx_skbuff_dma[entry];
2195 2409
2196 priv->hw->mode->refill_desc3(priv, p); 2410 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2411 p->des0 = priv->rx_skbuff_dma[entry];
2412 p->des1 = 0;
2413 } else {
2414 p->des2 = priv->rx_skbuff_dma[entry];
2415 }
2416 if (priv->hw->mode->refill_desc3)
2417 priv->hw->mode->refill_desc3(priv, p);
2197 2418
2198 if (priv->rx_zeroc_thresh > 0) 2419 if (priv->rx_zeroc_thresh > 0)
2199 priv->rx_zeroc_thresh--; 2420 priv->rx_zeroc_thresh--;
@@ -2201,9 +2422,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2201 if (netif_msg_rx_status(priv)) 2422 if (netif_msg_rx_status(priv))
2202 pr_debug("\trefill entry #%d\n", entry); 2423 pr_debug("\trefill entry #%d\n", entry);
2203 } 2424 }
2204
2205 wmb(); 2425 wmb();
2206 priv->hw->desc->set_rx_owner(p); 2426
2427 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2428 priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2429 else
2430 priv->hw->desc->set_rx_owner(p);
2431
2207 wmb(); 2432 wmb();
2208 2433
2209 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); 2434 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
@@ -2226,13 +2451,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2226 int coe = priv->hw->rx_csum; 2451 int coe = priv->hw->rx_csum;
2227 2452
2228 if (netif_msg_rx_status(priv)) { 2453 if (netif_msg_rx_status(priv)) {
2454 void *rx_head;
2455
2229 pr_debug("%s: descriptor ring:\n", __func__); 2456 pr_debug("%s: descriptor ring:\n", __func__);
2230 if (priv->extend_desc) 2457 if (priv->extend_desc)
2231 stmmac_display_ring((void *)priv->dma_erx, 2458 rx_head = (void *)priv->dma_erx;
2232 DMA_RX_SIZE, 1);
2233 else 2459 else
2234 stmmac_display_ring((void *)priv->dma_rx, 2460 rx_head = (void *)priv->dma_rx;
2235 DMA_RX_SIZE, 0); 2461
2462 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2236 } 2463 }
2237 while (count < limit) { 2464 while (count < limit) {
2238 int status; 2465 int status;
@@ -2282,11 +2509,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2282 } else { 2509 } else {
2283 struct sk_buff *skb; 2510 struct sk_buff *skb;
2284 int frame_len; 2511 int frame_len;
2512 unsigned int des;
2513
2514 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2515 des = p->des0;
2516 else
2517 des = p->des2;
2285 2518
2286 frame_len = priv->hw->desc->get_rx_frame_len(p, coe); 2519 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2287 2520
2288 /* check if frame_len fits the preallocated memory */ 2521 /* If frame length is greather than skb buffer size
2522 * (preallocated during init) then the packet is
2523 * ignored
2524 */
2289 if (frame_len > priv->dma_buf_sz) { 2525 if (frame_len > priv->dma_buf_sz) {
2526 pr_err("%s: len %d larger than size (%d)\n",
2527 priv->dev->name, frame_len,
2528 priv->dma_buf_sz);
2290 priv->dev->stats.rx_length_errors++; 2529 priv->dev->stats.rx_length_errors++;
2291 break; 2530 break;
2292 } 2531 }
@@ -2299,14 +2538,19 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2299 2538
2300 if (netif_msg_rx_status(priv)) { 2539 if (netif_msg_rx_status(priv)) {
2301 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", 2540 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
2302 p, entry, p->des2); 2541 p, entry, des);
2303 if (frame_len > ETH_FRAME_LEN) 2542 if (frame_len > ETH_FRAME_LEN)
2304 pr_debug("\tframe size %d, COE: %d\n", 2543 pr_debug("\tframe size %d, COE: %d\n",
2305 frame_len, status); 2544 frame_len, status);
2306 } 2545 }
2307 2546
2308 if (unlikely((frame_len < priv->rx_copybreak) || 2547 /* The zero-copy is always used for all the sizes
2309 stmmac_rx_threshold_count(priv))) { 2548 * in case of GMAC4 because it needs
2549 * to refill the used descriptors, always.
2550 */
2551 if (unlikely(!priv->plat->has_gmac4 &&
2552 ((frame_len < priv->rx_copybreak) ||
2553 stmmac_rx_threshold_count(priv)))) {
2310 skb = netdev_alloc_skb_ip_align(priv->dev, 2554 skb = netdev_alloc_skb_ip_align(priv->dev,
2311 frame_len); 2555 frame_len);
2312 if (unlikely(!skb)) { 2556 if (unlikely(!skb)) {
@@ -2458,7 +2702,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2458 return -EBUSY; 2702 return -EBUSY;
2459 } 2703 }
2460 2704
2461 if (priv->plat->enh_desc) 2705 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
2462 max_mtu = JUMBO_LEN; 2706 max_mtu = JUMBO_LEN;
2463 else 2707 else
2464 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 2708 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
@@ -2472,6 +2716,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2472 } 2716 }
2473 2717
2474 dev->mtu = new_mtu; 2718 dev->mtu = new_mtu;
2719
2475 netdev_update_features(dev); 2720 netdev_update_features(dev);
2476 2721
2477 return 0; 2722 return 0;
@@ -2496,6 +2741,14 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
2496 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 2741 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2497 features &= ~NETIF_F_CSUM_MASK; 2742 features &= ~NETIF_F_CSUM_MASK;
2498 2743
2744 /* Disable tso if asked by ethtool */
2745 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
2746 if (features & NETIF_F_TSO)
2747 priv->tso = true;
2748 else
2749 priv->tso = false;
2750 }
2751
2499 return features; 2752 return features;
2500} 2753}
2501 2754
@@ -2542,7 +2795,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2542 } 2795 }
2543 2796
2544 /* To handle GMAC own interrupts */ 2797 /* To handle GMAC own interrupts */
2545 if (priv->plat->has_gmac) { 2798 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
2546 int status = priv->hw->mac->host_irq_status(priv->hw, 2799 int status = priv->hw->mac->host_irq_status(priv->hw,
2547 &priv->xstats); 2800 &priv->xstats);
2548 if (unlikely(status)) { 2801 if (unlikely(status)) {
@@ -2551,6 +2804,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2551 priv->tx_path_in_lpi_mode = true; 2804 priv->tx_path_in_lpi_mode = true;
2552 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 2805 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2553 priv->tx_path_in_lpi_mode = false; 2806 priv->tx_path_in_lpi_mode = false;
2807 if (status & CORE_IRQ_MTL_RX_OVERFLOW)
2808 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2809 priv->rx_tail_addr,
2810 STMMAC_CHAN0);
2554 } 2811 }
2555 } 2812 }
2556 2813
@@ -2623,15 +2880,14 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
2623 x = *(u64 *) ep; 2880 x = *(u64 *) ep;
2624 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 2881 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2625 i, (unsigned int)virt_to_phys(ep), 2882 i, (unsigned int)virt_to_phys(ep),
2626 (unsigned int)x, (unsigned int)(x >> 32), 2883 ep->basic.des0, ep->basic.des1,
2627 ep->basic.des2, ep->basic.des3); 2884 ep->basic.des2, ep->basic.des3);
2628 ep++; 2885 ep++;
2629 } else { 2886 } else {
2630 x = *(u64 *) p; 2887 x = *(u64 *) p;
2631 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 2888 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2632 i, (unsigned int)virt_to_phys(ep), 2889 i, (unsigned int)virt_to_phys(ep),
2633 (unsigned int)x, (unsigned int)(x >> 32), 2890 p->des0, p->des1, p->des2, p->des3);
2634 p->des2, p->des3);
2635 p++; 2891 p++;
2636 } 2892 }
2637 seq_printf(seq, "\n"); 2893 seq_printf(seq, "\n");
@@ -2714,10 +2970,15 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2714 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 2970 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2715 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 2971 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2716 (priv->dma_cap.tx_coe) ? "Y" : "N"); 2972 (priv->dma_cap.tx_coe) ? "Y" : "N");
2717 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 2973 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2718 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 2974 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
2719 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 2975 (priv->dma_cap.rx_coe) ? "Y" : "N");
2720 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 2976 } else {
2977 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
2978 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
2979 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
2980 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
2981 }
2721 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 2982 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
2722 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 2983 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
2723 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 2984 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
@@ -2826,27 +3087,35 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2826 priv->dev->priv_flags |= IFF_UNICAST_FLT; 3087 priv->dev->priv_flags |= IFF_UNICAST_FLT;
2827 mac = dwmac1000_setup(priv->ioaddr, 3088 mac = dwmac1000_setup(priv->ioaddr,
2828 priv->plat->multicast_filter_bins, 3089 priv->plat->multicast_filter_bins,
2829 priv->plat->unicast_filter_entries); 3090 priv->plat->unicast_filter_entries,
3091 &priv->synopsys_id);
3092 } else if (priv->plat->has_gmac4) {
3093 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3094 mac = dwmac4_setup(priv->ioaddr,
3095 priv->plat->multicast_filter_bins,
3096 priv->plat->unicast_filter_entries,
3097 &priv->synopsys_id);
2830 } else { 3098 } else {
2831 mac = dwmac100_setup(priv->ioaddr); 3099 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
2832 } 3100 }
2833 if (!mac) 3101 if (!mac)
2834 return -ENOMEM; 3102 return -ENOMEM;
2835 3103
2836 priv->hw = mac; 3104 priv->hw = mac;
2837 3105
2838 /* Get and dump the chip ID */
2839 priv->synopsys_id = stmmac_get_synopsys_id(priv);
2840
2841 /* To use the chained or ring mode */ 3106 /* To use the chained or ring mode */
2842 if (chain_mode) { 3107 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2843 priv->hw->mode = &chain_mode_ops; 3108 priv->hw->mode = &dwmac4_ring_mode_ops;
2844 pr_info(" Chain mode enabled\n");
2845 priv->mode = STMMAC_CHAIN_MODE;
2846 } else { 3109 } else {
2847 priv->hw->mode = &ring_mode_ops; 3110 if (chain_mode) {
2848 pr_info(" Ring mode enabled\n"); 3111 priv->hw->mode = &chain_mode_ops;
2849 priv->mode = STMMAC_RING_MODE; 3112 pr_info(" Chain mode enabled\n");
3113 priv->mode = STMMAC_CHAIN_MODE;
3114 } else {
3115 priv->hw->mode = &ring_mode_ops;
3116 pr_info(" Ring mode enabled\n");
3117 priv->mode = STMMAC_RING_MODE;
3118 }
2850 } 3119 }
2851 3120
2852 /* Get the HW capability (new GMAC newer than 3.50a) */ 3121 /* Get the HW capability (new GMAC newer than 3.50a) */
@@ -2862,11 +3131,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2862 priv->plat->enh_desc = priv->dma_cap.enh_desc; 3131 priv->plat->enh_desc = priv->dma_cap.enh_desc;
2863 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 3132 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
2864 3133
2865 /* TXCOE doesn't work in thresh DMA mode */ 3134 priv->plat->tx_coe = priv->dma_cap.tx_coe;
2866 if (priv->plat->force_thresh_dma_mode) 3135 /* In case of GMAC4 rx_coe is from HW cap register. */
2867 priv->plat->tx_coe = 0; 3136 priv->plat->rx_coe = priv->dma_cap.rx_coe;
2868 else
2869 priv->plat->tx_coe = priv->dma_cap.tx_coe;
2870 3137
2871 if (priv->dma_cap.rx_coe_type2) 3138 if (priv->dma_cap.rx_coe_type2)
2872 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 3139 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
@@ -2876,13 +3143,17 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2876 } else 3143 } else
2877 pr_info(" No HW DMA feature register supported"); 3144 pr_info(" No HW DMA feature register supported");
2878 3145
2879 /* To use alternate (extended) or normal descriptor structures */ 3146 /* To use alternate (extended), normal or GMAC4 descriptor structures */
2880 stmmac_selec_desc_mode(priv); 3147 if (priv->synopsys_id >= DWMAC_CORE_4_00)
3148 priv->hw->desc = &dwmac4_desc_ops;
3149 else
3150 stmmac_selec_desc_mode(priv);
2881 3151
2882 if (priv->plat->rx_coe) { 3152 if (priv->plat->rx_coe) {
2883 priv->hw->rx_csum = priv->plat->rx_coe; 3153 priv->hw->rx_csum = priv->plat->rx_coe;
2884 pr_info(" RX Checksum Offload Engine supported (type %d)\n", 3154 pr_info(" RX Checksum Offload Engine supported\n");
2885 priv->plat->rx_coe); 3155 if (priv->synopsys_id < DWMAC_CORE_4_00)
3156 pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
2886 } 3157 }
2887 if (priv->plat->tx_coe) 3158 if (priv->plat->tx_coe)
2888 pr_info(" TX Checksum insertion supported\n"); 3159 pr_info(" TX Checksum insertion supported\n");
@@ -2892,6 +3163,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2892 device_set_wakeup_capable(priv->device, 1); 3163 device_set_wakeup_capable(priv->device, 1);
2893 } 3164 }
2894 3165
3166 if (priv->dma_cap.tsoen)
3167 pr_info(" TSO supported\n");
3168
2895 return 0; 3169 return 0;
2896} 3170}
2897 3171
@@ -2995,6 +3269,12 @@ int stmmac_dvr_probe(struct device *device,
2995 3269
2996 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 3270 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2997 NETIF_F_RXCSUM; 3271 NETIF_F_RXCSUM;
3272
3273 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3274 ndev->hw_features |= NETIF_F_TSO;
3275 priv->tso = true;
3276 pr_info(" TSO feature enabled\n");
3277 }
2998 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 3278 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2999 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 3279 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
3000#ifdef STMMAC_VLAN_TAG_USED 3280#ifdef STMMAC_VLAN_TAG_USED
@@ -3189,6 +3469,11 @@ int stmmac_resume(struct net_device *ndev)
3189 priv->dirty_rx = 0; 3469 priv->dirty_rx = 0;
3190 priv->dirty_tx = 0; 3470 priv->dirty_tx = 0;
3191 priv->cur_tx = 0; 3471 priv->cur_tx = 0;
3472 /* reset private mss value to force mss context settings at
3473 * next tso xmit (only used for gmac4).
3474 */
3475 priv->mss = 0;
3476
3192 stmmac_clear_descriptors(priv); 3477 stmmac_clear_descriptors(priv);
3193 3478
3194 stmmac_hw_setup(ndev, false); 3479 stmmac_hw_setup(ndev, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ea76129dafc2..06704ca6f9ca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -198,20 +198,12 @@ int stmmac_mdio_register(struct net_device *ndev)
198 struct mii_bus *new_bus; 198 struct mii_bus *new_bus;
199 struct stmmac_priv *priv = netdev_priv(ndev); 199 struct stmmac_priv *priv = netdev_priv(ndev);
200 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; 200 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
201 int addr, found;
202 struct device_node *mdio_node = priv->plat->mdio_node; 201 struct device_node *mdio_node = priv->plat->mdio_node;
202 int addr, found;
203 203
204 if (!mdio_bus_data) 204 if (!mdio_bus_data)
205 return 0; 205 return 0;
206 206
207 if (IS_ENABLED(CONFIG_OF)) {
208 if (mdio_node) {
209 netdev_dbg(ndev, "FOUND MDIO subnode\n");
210 } else {
211 netdev_warn(ndev, "No MDIO subnode found\n");
212 }
213 }
214
215 new_bus = mdiobus_alloc(); 207 new_bus = mdiobus_alloc();
216 if (new_bus == NULL) 208 if (new_bus == NULL)
217 return -ENOMEM; 209 return -ENOMEM;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index dcbd2a1601e8..effaa4ff5ab7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -132,6 +132,69 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
132} 132}
133 133
134/** 134/**
135 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
136 * @plat: driver data platform structure
137 * @np: device tree node
138 * @dev: device pointer
139 * Description:
140 * The mdio bus will be allocated in case of a phy transceiver is on board;
141 * it will be NULL if the fixed-link is configured.
142 * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
143 * in any case (for DSA, mdio must be registered even if fixed-link).
144 * The table below sums the supported configurations:
145 * -------------------------------
146 * snps,phy-addr | Y
147 * -------------------------------
148 * phy-handle | Y
149 * -------------------------------
150 * fixed-link | N
151 * -------------------------------
152 * snps,dwmac-mdio |
153 * even if | Y
154 * fixed-link |
155 * -------------------------------
156 *
157 * It returns 0 in case of success otherwise -ENODEV.
158 */
159static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
160 struct device_node *np, struct device *dev)
161{
162 bool mdio = true;
163
164 /* If phy-handle property is passed from DT, use it as the PHY */
165 plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
166 if (plat->phy_node)
167 dev_dbg(dev, "Found phy-handle subnode\n");
168
169 /* If phy-handle is not specified, check if we have a fixed-phy */
170 if (!plat->phy_node && of_phy_is_fixed_link(np)) {
171 if ((of_phy_register_fixed_link(np) < 0))
172 return -ENODEV;
173
174 dev_dbg(dev, "Found fixed-link subnode\n");
175 plat->phy_node = of_node_get(np);
176 mdio = false;
177 }
178
179 /* If snps,dwmac-mdio is passed from DT, always register the MDIO */
180 for_each_child_of_node(np, plat->mdio_node) {
181 if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio"))
182 break;
183 }
184
185 if (plat->mdio_node) {
186 dev_dbg(dev, "Found MDIO subnode\n");
187 mdio = true;
188 }
189
190 if (mdio)
191 plat->mdio_bus_data =
192 devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
193 GFP_KERNEL);
194 return 0;
195}
196
197/**
135 * stmmac_probe_config_dt - parse device-tree driver parameters 198 * stmmac_probe_config_dt - parse device-tree driver parameters
136 * @pdev: platform_device structure 199 * @pdev: platform_device structure
137 * @plat: driver data platform structure 200 * @plat: driver data platform structure
@@ -146,7 +209,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
146 struct device_node *np = pdev->dev.of_node; 209 struct device_node *np = pdev->dev.of_node;
147 struct plat_stmmacenet_data *plat; 210 struct plat_stmmacenet_data *plat;
148 struct stmmac_dma_cfg *dma_cfg; 211 struct stmmac_dma_cfg *dma_cfg;
149 struct device_node *child_node = NULL;
150 212
151 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 213 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
152 if (!plat) 214 if (!plat)
@@ -166,36 +228,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
166 /* Default to phy auto-detection */ 228 /* Default to phy auto-detection */
167 plat->phy_addr = -1; 229 plat->phy_addr = -1;
168 230
169 /* If we find a phy-handle property, use it as the PHY */
170 plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
171
172 /* If phy-handle is not specified, check if we have a fixed-phy */
173 if (!plat->phy_node && of_phy_is_fixed_link(np)) {
174 if ((of_phy_register_fixed_link(np) < 0))
175 return ERR_PTR(-ENODEV);
176
177 plat->phy_node = of_node_get(np);
178 }
179
180 for_each_child_of_node(np, child_node)
181 if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) {
182 plat->mdio_node = child_node;
183 break;
184 }
185
186 /* "snps,phy-addr" is not a standard property. Mark it as deprecated 231 /* "snps,phy-addr" is not a standard property. Mark it as deprecated
187 * and warn of its use. Remove this when phy node support is added. 232 * and warn of its use. Remove this when phy node support is added.
188 */ 233 */
189 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) 234 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
190 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); 235 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
191 236
192 if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node) 237 /* To Configure PHY by using all device-tree supported properties */
193 plat->mdio_bus_data = NULL; 238 if (stmmac_dt_phy(plat, np, &pdev->dev))
194 else 239 return ERR_PTR(-ENODEV);
195 plat->mdio_bus_data =
196 devm_kzalloc(&pdev->dev,
197 sizeof(struct stmmac_mdio_bus_data),
198 GFP_KERNEL);
199 240
200 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); 241 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
201 242
@@ -243,6 +284,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
243 plat->pmt = 1; 284 plat->pmt = 1;
244 } 285 }
245 286
287 if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
288 of_device_is_compatible(np, "snps,dwmac-4.10a")) {
289 plat->has_gmac4 = 1;
290 plat->pmt = 1;
291 plat->tso_en = of_property_read_bool(np, "snps,tso");
292 }
293
246 if (of_device_is_compatible(np, "snps,dwmac-3.610") || 294 if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
247 of_device_is_compatible(np, "snps,dwmac-3.710")) { 295 of_device_is_compatible(np, "snps,dwmac-3.710")) {
248 plat->enh_desc = 1; 296 plat->enh_desc = 1;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 192631a345df..a9fbf17eb256 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -87,7 +87,6 @@ struct geneve_sock {
87 struct socket *sock; 87 struct socket *sock;
88 struct rcu_head rcu; 88 struct rcu_head rcu;
89 int refcnt; 89 int refcnt;
90 struct udp_offload udp_offloads;
91 struct hlist_head vni_list[VNI_HASH_SIZE]; 90 struct hlist_head vni_list[VNI_HASH_SIZE];
92 u32 flags; 91 u32 flags;
93}; 92};
@@ -409,14 +408,6 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
409 struct net *net = sock_net(sk); 408 struct net *net = sock_net(sk);
410 sa_family_t sa_family = geneve_get_sk_family(gs); 409 sa_family_t sa_family = geneve_get_sk_family(gs);
411 __be16 port = inet_sk(sk)->inet_sport; 410 __be16 port = inet_sk(sk)->inet_sport;
412 int err;
413
414 if (sa_family == AF_INET) {
415 err = udp_add_offload(sock_net(sk), &gs->udp_offloads);
416 if (err)
417 pr_warn("geneve: udp_add_offload failed with status %d\n",
418 err);
419 }
420 411
421 rcu_read_lock(); 412 rcu_read_lock();
422 for_each_netdev_rcu(net, dev) { 413 for_each_netdev_rcu(net, dev) {
@@ -432,9 +423,9 @@ static int geneve_hlen(struct genevehdr *gh)
432 return sizeof(*gh) + gh->opt_len * 4; 423 return sizeof(*gh) + gh->opt_len * 4;
433} 424}
434 425
435static struct sk_buff **geneve_gro_receive(struct sk_buff **head, 426static struct sk_buff **geneve_gro_receive(struct sock *sk,
436 struct sk_buff *skb, 427 struct sk_buff **head,
437 struct udp_offload *uoff) 428 struct sk_buff *skb)
438{ 429{
439 struct sk_buff *p, **pp = NULL; 430 struct sk_buff *p, **pp = NULL;
440 struct genevehdr *gh, *gh2; 431 struct genevehdr *gh, *gh2;
@@ -495,8 +486,8 @@ out:
495 return pp; 486 return pp;
496} 487}
497 488
498static int geneve_gro_complete(struct sk_buff *skb, int nhoff, 489static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
499 struct udp_offload *uoff) 490 int nhoff)
500{ 491{
501 struct genevehdr *gh; 492 struct genevehdr *gh;
502 struct packet_offload *ptype; 493 struct packet_offload *ptype;
@@ -545,14 +536,14 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
545 INIT_HLIST_HEAD(&gs->vni_list[h]); 536 INIT_HLIST_HEAD(&gs->vni_list[h]);
546 537
547 /* Initialize the geneve udp offloads structure */ 538 /* Initialize the geneve udp offloads structure */
548 gs->udp_offloads.port = port;
549 gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive;
550 gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
551 geneve_notify_add_rx_port(gs); 539 geneve_notify_add_rx_port(gs);
552 540
553 /* Mark socket as an encapsulation socket */ 541 /* Mark socket as an encapsulation socket */
542 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
554 tunnel_cfg.sk_user_data = gs; 543 tunnel_cfg.sk_user_data = gs;
555 tunnel_cfg.encap_type = 1; 544 tunnel_cfg.encap_type = 1;
545 tunnel_cfg.gro_receive = geneve_gro_receive;
546 tunnel_cfg.gro_complete = geneve_gro_complete;
556 tunnel_cfg.encap_rcv = geneve_udp_encap_recv; 547 tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
557 tunnel_cfg.encap_destroy = NULL; 548 tunnel_cfg.encap_destroy = NULL;
558 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 549 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
@@ -576,9 +567,6 @@ static void geneve_notify_del_rx_port(struct geneve_sock *gs)
576 } 567 }
577 568
578 rcu_read_unlock(); 569 rcu_read_unlock();
579
580 if (sa_family == AF_INET)
581 udp_del_offload(&gs->udp_offloads);
582} 570}
583 571
584static void __geneve_sock_release(struct geneve_sock *gs) 572static void __geneve_sock_release(struct geneve_sock *gs)
@@ -843,8 +831,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
843 if (info) { 831 if (info) {
844 fl6->daddr = info->key.u.ipv6.dst; 832 fl6->daddr = info->key.u.ipv6.dst;
845 fl6->saddr = info->key.u.ipv6.src; 833 fl6->saddr = info->key.u.ipv6.src;
846 fl6->flowi6_tos = RT_TOS(info->key.tos); 834 fl6->flowlabel = ip6_make_flowinfo(RT_TOS(info->key.tos),
847 fl6->flowlabel = info->key.label; 835 info->key.label);
848 dst_cache = &info->dst_cache; 836 dst_cache = &info->dst_cache;
849 } else { 837 } else {
850 prio = geneve->tos; 838 prio = geneve->tos;
@@ -855,8 +843,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
855 use_cache = false; 843 use_cache = false;
856 } 844 }
857 845
858 fl6->flowi6_tos = RT_TOS(prio); 846 fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
859 fl6->flowlabel = geneve->label; 847 geneve->label);
860 fl6->daddr = geneve->remote.sin6.sin6_addr; 848 fl6->daddr = geneve->remote.sin6.sin6_addr;
861 dst_cache = &geneve->dst_cache; 849 dst_cache = &geneve->dst_cache;
862 } 850 }
@@ -1049,7 +1037,8 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1049 if (unlikely(err)) 1037 if (unlikely(err))
1050 goto err; 1038 goto err;
1051 1039
1052 prio = ip_tunnel_ecn_encap(fl6.flowi6_tos, iip, skb); 1040 prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
1041 iip, skb);
1053 ttl = geneve->ttl; 1042 ttl = geneve->ttl;
1054 if (!ttl && ipv6_addr_is_multicast(&fl6.daddr)) 1043 if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
1055 ttl = 1; 1044 ttl = 1;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index b4c68783dfc3..8b3bd8ecd1c4 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -619,6 +619,7 @@ struct nvsp_message {
619#define NETVSC_PACKET_SIZE 4096 619#define NETVSC_PACKET_SIZE 4096
620 620
621#define VRSS_SEND_TAB_SIZE 16 621#define VRSS_SEND_TAB_SIZE 16
622#define VRSS_CHANNEL_MAX 64
622 623
623#define RNDIS_MAX_PKT_DEFAULT 8 624#define RNDIS_MAX_PKT_DEFAULT 8
624#define RNDIS_PKT_ALIGN_DEFAULT 8 625#define RNDIS_PKT_ALIGN_DEFAULT 8
@@ -700,13 +701,13 @@ struct netvsc_device {
700 701
701 struct net_device *ndev; 702 struct net_device *ndev;
702 703
703 struct vmbus_channel *chn_table[NR_CPUS]; 704 struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
704 u32 send_table[VRSS_SEND_TAB_SIZE]; 705 u32 send_table[VRSS_SEND_TAB_SIZE];
705 u32 max_chn; 706 u32 max_chn;
706 u32 num_chn; 707 u32 num_chn;
707 spinlock_t sc_lock; /* Protects num_sc_offered variable */ 708 spinlock_t sc_lock; /* Protects num_sc_offered variable */
708 u32 num_sc_offered; 709 u32 num_sc_offered;
709 atomic_t queue_sends[NR_CPUS]; 710 atomic_t queue_sends[VRSS_CHANNEL_MAX];
710 711
711 /* Holds rndis device info */ 712 /* Holds rndis device info */
712 void *extension; 713 void *extension;
@@ -718,7 +719,7 @@ struct netvsc_device {
718 /* The sub channel callback buffer */ 719 /* The sub channel callback buffer */
719 unsigned char *sub_cb_buf; 720 unsigned char *sub_cb_buf;
720 721
721 struct multi_send_data msd[NR_CPUS]; 722 struct multi_send_data msd[VRSS_CHANNEL_MAX];
722 u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ 723 u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
723 u32 pkt_align; /* alignment bytes, e.g. 8 */ 724 u32 pkt_align; /* alignment bytes, e.g. 8 */
724 725
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 08608499fa17..b8121eba33ff 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -858,6 +858,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
858 struct netvsc_device *nvdev = hv_get_drvdata(hdev); 858 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
859 struct netvsc_device_info device_info; 859 struct netvsc_device_info device_info;
860 int limit = ETH_DATA_LEN; 860 int limit = ETH_DATA_LEN;
861 u32 num_chn;
861 int ret = 0; 862 int ret = 0;
862 863
863 if (nvdev == NULL || nvdev->destroy) 864 if (nvdev == NULL || nvdev->destroy)
@@ -873,6 +874,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
873 if (ret) 874 if (ret)
874 goto out; 875 goto out;
875 876
877 num_chn = nvdev->num_chn;
878
876 nvdev->start_remove = true; 879 nvdev->start_remove = true;
877 rndis_filter_device_remove(hdev); 880 rndis_filter_device_remove(hdev);
878 881
@@ -883,7 +886,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
883 886
884 memset(&device_info, 0, sizeof(device_info)); 887 memset(&device_info, 0, sizeof(device_info));
885 device_info.ring_size = ring_size; 888 device_info.ring_size = ring_size;
886 device_info.num_chn = nvdev->num_chn; 889 device_info.num_chn = num_chn;
887 device_info.max_num_vrss_chns = max_num_vrss_chns; 890 device_info.max_num_vrss_chns = max_num_vrss_chns;
888 rndis_filter_device_add(hdev, &device_info); 891 rndis_filter_device_add(hdev, &device_info);
889 892
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 47d07c576a34..c4e1e0408433 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -986,12 +986,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
986 986
987 nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj); 987 nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
988 988
989 spin_lock_irqsave(&nvscdev->sc_lock, flags);
990 nvscdev->num_sc_offered--;
991 spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
992 if (nvscdev->num_sc_offered == 0)
993 complete(&nvscdev->channel_init_wait);
994
995 if (chn_index >= nvscdev->num_chn) 989 if (chn_index >= nvscdev->num_chn)
996 return; 990 return;
997 991
@@ -1004,6 +998,12 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
1004 998
1005 if (ret == 0) 999 if (ret == 0)
1006 nvscdev->chn_table[chn_index] = new_sc; 1000 nvscdev->chn_table[chn_index] = new_sc;
1001
1002 spin_lock_irqsave(&nvscdev->sc_lock, flags);
1003 nvscdev->num_sc_offered--;
1004 spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
1005 if (nvscdev->num_sc_offered == 0)
1006 complete(&nvscdev->channel_init_wait);
1007} 1007}
1008 1008
1009int rndis_filter_device_add(struct hv_device *dev, 1009int rndis_filter_device_add(struct hv_device *dev,
@@ -1113,9 +1113,9 @@ int rndis_filter_device_add(struct hv_device *dev,
1113 if (ret || rsscap.num_recv_que < 2) 1113 if (ret || rsscap.num_recv_que < 2)
1114 goto out; 1114 goto out;
1115 1115
1116 num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que); 1116 net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, rsscap.num_recv_que);
1117 1117
1118 net_device->max_chn = rsscap.num_recv_que; 1118 num_rss_qs = min(device_info->max_num_vrss_chns, net_device->max_chn);
1119 1119
1120 /* 1120 /*
1121 * We will limit the VRSS channels to the number CPUs in the NUMA node 1121 * We will limit the VRSS channels to the number CPUs in the NUMA node
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index a2c227bfb687..e070e1222733 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -394,12 +394,5 @@ config MCS_FIR
394 To compile it as a module, choose M here: the module will be called 394 To compile it as a module, choose M here: the module will be called
395 mcs7780. 395 mcs7780.
396 396
397config SH_IRDA
398 tristate "SuperH IrDA driver"
399 depends on IRDA
400 depends on (ARCH_SHMOBILE || COMPILE_TEST) && HAS_IOMEM
401 help
402 Say Y here if your want to enable SuperH IrDA devices.
403
404endmenu 397endmenu
405 398
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index be8ab5b9a4a2..4c344433dae5 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_VIA_FIR) += via-ircc.o
19obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o 19obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
20obj-$(CONFIG_MCS_FIR) += mcs7780.o 20obj-$(CONFIG_MCS_FIR) += mcs7780.o
21obj-$(CONFIG_AU1000_FIR) += au1k_ir.o 21obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
22obj-$(CONFIG_SH_IRDA) += sh_irda.o
23# SIR drivers 22# SIR drivers
24obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o 23obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
25obj-$(CONFIG_BFIN_SIR) += bfin_sir.o 24obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
deleted file mode 100644
index c96b46b2c3a8..000000000000
--- a/drivers/net/irda/sh_irda.c
+++ /dev/null
@@ -1,875 +0,0 @@
1/*
2 * SuperH IrDA Driver
3 *
4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * Based on sh_sir.c
8 * Copyright (C) 2009 Renesas Solutions Corp.
9 * Copyright 2006-2009 Analog Devices Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16/*
17 * CAUTION
18 *
19 * This driver is very simple.
20 * So, it doesn't have below support now
21 * - MIR/FIR support
22 * - DMA transfer support
23 * - FIFO mode support
24 */
25#include <linux/io.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/pm_runtime.h>
30#include <linux/clk.h>
31#include <net/irda/wrapper.h>
32#include <net/irda/irda_device.h>
33
34#define DRIVER_NAME "sh_irda"
35
36#define __IRDARAM_LEN 0x1039
37
38#define IRTMR 0x1F00 /* Transfer mode */
39#define IRCFR 0x1F02 /* Configuration */
40#define IRCTR 0x1F04 /* IR control */
41#define IRTFLR 0x1F20 /* Transmit frame length */
42#define IRTCTR 0x1F22 /* Transmit control */
43#define IRRFLR 0x1F40 /* Receive frame length */
44#define IRRCTR 0x1F42 /* Receive control */
45#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
46#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
47#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
48#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
49#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
50#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
51#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
52#define CRCCTR 0x1F80 /* CRC engine control */
53#define CRCIR 0x1F86 /* CRC engine input data */
54#define CRCCR 0x1F8A /* CRC engine calculation */
55#define CRCOR 0x1F8E /* CRC engine output data */
56#define FIFOCP 0x1FC0 /* FIFO current pointer */
57#define FIFOFP 0x1FC2 /* FIFO follow pointer */
58#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
59#define FIFORSOR 0x1FC6 /* FIFO receive status OR */
60#define FIFOSEL 0x1FC8 /* FIFO select */
61#define FIFORS 0x1FCA /* FIFO receive status */
62#define FIFORFL 0x1FCC /* FIFO receive frame length */
63#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
64#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
65#define BIFCTL 0x1FD2 /* BUS interface control */
66#define IRDARAM 0x0000 /* IrDA buffer RAM */
67#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
68
69/* IRTMR */
70#define TMD_MASK (0x3 << 14) /* Transfer Mode */
71#define TMD_SIR (0x0 << 14)
72#define TMD_MIR (0x3 << 14)
73#define TMD_FIR (0x2 << 14)
74
75#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
76#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
77#define SIM (1 << 0) /* SIR Interrupt Mask */
78#define xIM_MASK (FIFORIM | MIM | SIM)
79
80/* IRCFR */
81#define RTO_SHIFT 8 /* shift for Receive Timeout */
82#define RTO (0x3 << RTO_SHIFT)
83
84/* IRTCTR */
85#define ARMOD (1 << 15) /* Auto-Receive Mode */
86#define TE (1 << 0) /* Transmit Enable */
87
88/* IRRFLR */
89#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
90
91/* IRRCTR */
92#define RE (1 << 0) /* Receive Enable */
93
94/*
95 * SIRISR, SIRIMR, SIRICR,
96 * MFIRISR, MFIRIMR, MFIRICR
97 */
98#define FRE (1 << 15) /* Frame Receive End */
99#define TROV (1 << 11) /* Transfer Area Overflow */
100#define xIR_9 (1 << 9)
101#define TOT xIR_9 /* for SIR Timeout */
102#define ABTD xIR_9 /* for MIR/FIR Abort Detection */
103#define xIR_8 (1 << 8)
104#define FER xIR_8 /* for SIR Framing Error */
105#define CRCER xIR_8 /* for MIR/FIR CRC error */
106#define FTE (1 << 7) /* Frame Transmit End */
107#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
108
109/* SIRBCR */
110#define BRC_MASK (0x3F) /* mask for Baud Rate Count */
111
112/* CRCCTR */
113#define CRC_RST (1 << 15) /* CRC Engine Reset */
114#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
115
116/* CRCIR */
117#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
118
119/************************************************************************
120
121
122 enum / structure
123
124
125************************************************************************/
126enum sh_irda_mode {
127 SH_IRDA_NONE = 0,
128 SH_IRDA_SIR,
129 SH_IRDA_MIR,
130 SH_IRDA_FIR,
131};
132
133struct sh_irda_self;
134struct sh_irda_xir_func {
135 int (*xir_fre) (struct sh_irda_self *self);
136 int (*xir_trov) (struct sh_irda_self *self);
137 int (*xir_9) (struct sh_irda_self *self);
138 int (*xir_8) (struct sh_irda_self *self);
139 int (*xir_fte) (struct sh_irda_self *self);
140};
141
142struct sh_irda_self {
143 void __iomem *membase;
144 unsigned int irq;
145 struct platform_device *pdev;
146
147 struct net_device *ndev;
148
149 struct irlap_cb *irlap;
150 struct qos_info qos;
151
152 iobuff_t tx_buff;
153 iobuff_t rx_buff;
154
155 enum sh_irda_mode mode;
156 spinlock_t lock;
157
158 struct sh_irda_xir_func *xir_func;
159};
160
161/************************************************************************
162
163
164 common function
165
166
167************************************************************************/
168static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
169{
170 unsigned long flags;
171
172 spin_lock_irqsave(&self->lock, flags);
173 iowrite16(data, self->membase + offset);
174 spin_unlock_irqrestore(&self->lock, flags);
175}
176
177static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
178{
179 unsigned long flags;
180 u16 ret;
181
182 spin_lock_irqsave(&self->lock, flags);
183 ret = ioread16(self->membase + offset);
184 spin_unlock_irqrestore(&self->lock, flags);
185
186 return ret;
187}
188
189static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
190 u16 mask, u16 data)
191{
192 unsigned long flags;
193 u16 old, new;
194
195 spin_lock_irqsave(&self->lock, flags);
196 old = ioread16(self->membase + offset);
197 new = (old & ~mask) | data;
198 if (old != new)
199 iowrite16(data, self->membase + offset);
200 spin_unlock_irqrestore(&self->lock, flags);
201}
202
203/************************************************************************
204
205
206 mode function
207
208
209************************************************************************/
210/*=====================================
211 *
212 * common
213 *
214 *=====================================*/
215static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
216{
217 struct device *dev = &self->ndev->dev;
218
219 sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
220 dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
221}
222
223static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
224{
225 struct device *dev = &self->ndev->dev;
226
227 if (SH_IRDA_SIR != self->mode)
228 interval = 0;
229
230 if (interval < 0 || interval > 2) {
231 dev_err(dev, "unsupported timeout interval\n");
232 return -EINVAL;
233 }
234
235 sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
236 return 0;
237}
238
239static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
240{
241 struct device *dev = &self->ndev->dev;
242 u16 val;
243
244 if (baudrate < 0)
245 return 0;
246
247 if (SH_IRDA_SIR != self->mode) {
248 dev_err(dev, "it is not SIR mode\n");
249 return -EINVAL;
250 }
251
252 /*
253 * Baud rate (bits/s) =
254 * (48 MHz / 26) / (baud rate counter value + 1) x 16
255 */
256 val = (48000000 / 26 / 16 / baudrate) - 1;
257 dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
258
259 sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
260
261 return 0;
262}
263
264static int sh_irda_get_rcv_length(struct sh_irda_self *self)
265{
266 return RFL_MASK & sh_irda_read(self, IRRFLR);
267}
268
269/*=====================================
270 *
271 * NONE MODE
272 *
273 *=====================================*/
274static int sh_irda_xir_fre(struct sh_irda_self *self)
275{
276 struct device *dev = &self->ndev->dev;
277 dev_err(dev, "none mode: frame recv\n");
278 return 0;
279}
280
281static int sh_irda_xir_trov(struct sh_irda_self *self)
282{
283 struct device *dev = &self->ndev->dev;
284 dev_err(dev, "none mode: buffer ram over\n");
285 return 0;
286}
287
288static int sh_irda_xir_9(struct sh_irda_self *self)
289{
290 struct device *dev = &self->ndev->dev;
291 dev_err(dev, "none mode: time over\n");
292 return 0;
293}
294
295static int sh_irda_xir_8(struct sh_irda_self *self)
296{
297 struct device *dev = &self->ndev->dev;
298 dev_err(dev, "none mode: framing error\n");
299 return 0;
300}
301
302static int sh_irda_xir_fte(struct sh_irda_self *self)
303{
304 struct device *dev = &self->ndev->dev;
305 dev_err(dev, "none mode: frame transmit end\n");
306 return 0;
307}
308
309static struct sh_irda_xir_func sh_irda_xir_func = {
310 .xir_fre = sh_irda_xir_fre,
311 .xir_trov = sh_irda_xir_trov,
312 .xir_9 = sh_irda_xir_9,
313 .xir_8 = sh_irda_xir_8,
314 .xir_fte = sh_irda_xir_fte,
315};
316
317/*=====================================
318 *
319 * MIR/FIR MODE
320 *
321 * MIR/FIR are not supported now
322 *=====================================*/
323static struct sh_irda_xir_func sh_irda_mfir_func = {
324 .xir_fre = sh_irda_xir_fre,
325 .xir_trov = sh_irda_xir_trov,
326 .xir_9 = sh_irda_xir_9,
327 .xir_8 = sh_irda_xir_8,
328 .xir_fte = sh_irda_xir_fte,
329};
330
331/*=====================================
332 *
333 * SIR MODE
334 *
335 *=====================================*/
336static int sh_irda_sir_fre(struct sh_irda_self *self)
337{
338 struct device *dev = &self->ndev->dev;
339 u16 data16;
340 u8 *data = (u8 *)&data16;
341 int len = sh_irda_get_rcv_length(self);
342 int i, j;
343
344 if (len > IRDARAM_LEN)
345 len = IRDARAM_LEN;
346
347 dev_dbg(dev, "frame recv length = %d\n", len);
348
349 for (i = 0; i < len; i++) {
350 j = i % 2;
351 if (!j)
352 data16 = sh_irda_read(self, IRDARAM + i);
353
354 async_unwrap_char(self->ndev, &self->ndev->stats,
355 &self->rx_buff, data[j]);
356 }
357 self->ndev->last_rx = jiffies;
358
359 sh_irda_rcv_ctrl(self, 1);
360
361 return 0;
362}
363
364static int sh_irda_sir_trov(struct sh_irda_self *self)
365{
366 struct device *dev = &self->ndev->dev;
367
368 dev_err(dev, "buffer ram over\n");
369 sh_irda_rcv_ctrl(self, 1);
370 return 0;
371}
372
373static int sh_irda_sir_tot(struct sh_irda_self *self)
374{
375 struct device *dev = &self->ndev->dev;
376
377 dev_err(dev, "time over\n");
378 sh_irda_set_baudrate(self, 9600);
379 sh_irda_rcv_ctrl(self, 1);
380 return 0;
381}
382
383static int sh_irda_sir_fer(struct sh_irda_self *self)
384{
385 struct device *dev = &self->ndev->dev;
386
387 dev_err(dev, "framing error\n");
388 sh_irda_rcv_ctrl(self, 1);
389 return 0;
390}
391
392static int sh_irda_sir_fte(struct sh_irda_self *self)
393{
394 struct device *dev = &self->ndev->dev;
395
396 dev_dbg(dev, "frame transmit end\n");
397 netif_wake_queue(self->ndev);
398
399 return 0;
400}
401
402static struct sh_irda_xir_func sh_irda_sir_func = {
403 .xir_fre = sh_irda_sir_fre,
404 .xir_trov = sh_irda_sir_trov,
405 .xir_9 = sh_irda_sir_tot,
406 .xir_8 = sh_irda_sir_fer,
407 .xir_fte = sh_irda_sir_fte,
408};
409
410static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
411{
412 struct device *dev = &self->ndev->dev;
413 struct sh_irda_xir_func *func;
414 const char *name;
415 u16 data;
416
417 switch (mode) {
418 case SH_IRDA_SIR:
419 name = "SIR";
420 data = TMD_SIR;
421 func = &sh_irda_sir_func;
422 break;
423 case SH_IRDA_MIR:
424 name = "MIR";
425 data = TMD_MIR;
426 func = &sh_irda_mfir_func;
427 break;
428 case SH_IRDA_FIR:
429 name = "FIR";
430 data = TMD_FIR;
431 func = &sh_irda_mfir_func;
432 break;
433 default:
434 name = "NONE";
435 data = 0;
436 func = &sh_irda_xir_func;
437 break;
438 }
439
440 self->mode = mode;
441 self->xir_func = func;
442 sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
443
444 dev_dbg(dev, "switch to %s mode", name);
445}
446
447/************************************************************************
448
449
450 irq function
451
452
453************************************************************************/
454static void sh_irda_set_irq_mask(struct sh_irda_self *self)
455{
456 u16 tmr_hole;
457 u16 xir_reg;
458
459 /* set all mask */
460 sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
461 sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
462 sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
463
464 /* clear irq */
465 sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
466 sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
467
468 switch (self->mode) {
469 case SH_IRDA_SIR:
470 tmr_hole = SIM;
471 xir_reg = SIRIMR;
472 break;
473 case SH_IRDA_MIR:
474 case SH_IRDA_FIR:
475 tmr_hole = MIM;
476 xir_reg = MFIRIMR;
477 break;
478 default:
479 tmr_hole = 0;
480 xir_reg = 0;
481 break;
482 }
483
484 /* open mask */
485 if (xir_reg) {
486 sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
487 sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
488 }
489}
490
491static irqreturn_t sh_irda_irq(int irq, void *dev_id)
492{
493 struct sh_irda_self *self = dev_id;
494 struct sh_irda_xir_func *func = self->xir_func;
495 u16 isr = sh_irda_read(self, SIRISR);
496
497 /* clear irq */
498 sh_irda_write(self, SIRICR, isr);
499
500 if (isr & FRE)
501 func->xir_fre(self);
502 if (isr & TROV)
503 func->xir_trov(self);
504 if (isr & xIR_9)
505 func->xir_9(self);
506 if (isr & xIR_8)
507 func->xir_8(self);
508 if (isr & FTE)
509 func->xir_fte(self);
510
511 return IRQ_HANDLED;
512}
513
514/************************************************************************
515
516
517 CRC function
518
519
520************************************************************************/
521static void sh_irda_crc_reset(struct sh_irda_self *self)
522{
523 sh_irda_write(self, CRCCTR, CRC_RST);
524}
525
526static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
527{
528 sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
529}
530
531static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
532{
533 return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
534}
535
536static u16 sh_irda_crc_out(struct sh_irda_self *self)
537{
538 return sh_irda_read(self, CRCOR);
539}
540
541static int sh_irda_crc_init(struct sh_irda_self *self)
542{
543 struct device *dev = &self->ndev->dev;
544 int ret = -EIO;
545 u16 val;
546
547 sh_irda_crc_reset(self);
548
549 sh_irda_crc_add(self, 0xCC);
550 sh_irda_crc_add(self, 0xF5);
551 sh_irda_crc_add(self, 0xF1);
552 sh_irda_crc_add(self, 0xA7);
553
554 val = sh_irda_crc_cnt(self);
555 if (4 != val) {
556 dev_err(dev, "CRC count error %x\n", val);
557 goto crc_init_out;
558 }
559
560 val = sh_irda_crc_out(self);
561 if (0x51DF != val) {
562 dev_err(dev, "CRC result error%x\n", val);
563 goto crc_init_out;
564 }
565
566 ret = 0;
567
568crc_init_out:
569
570 sh_irda_crc_reset(self);
571 return ret;
572}
573
574/************************************************************************
575
576
577 iobuf function
578
579
580************************************************************************/
581static void sh_irda_remove_iobuf(struct sh_irda_self *self)
582{
583 kfree(self->rx_buff.head);
584
585 self->tx_buff.head = NULL;
586 self->tx_buff.data = NULL;
587 self->rx_buff.head = NULL;
588 self->rx_buff.data = NULL;
589}
590
591static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
592{
593 if (self->rx_buff.head ||
594 self->tx_buff.head) {
595 dev_err(&self->ndev->dev, "iobuff has already existed.");
596 return -EINVAL;
597 }
598
599 /* rx_buff */
600 self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
601 if (!self->rx_buff.head)
602 return -ENOMEM;
603
604 self->rx_buff.truesize = rxsize;
605 self->rx_buff.in_frame = FALSE;
606 self->rx_buff.state = OUTSIDE_FRAME;
607 self->rx_buff.data = self->rx_buff.head;
608
609 /* tx_buff */
610 self->tx_buff.head = self->membase + IRDARAM;
611 self->tx_buff.truesize = IRDARAM_LEN;
612
613 return 0;
614}
615
616/************************************************************************
617
618
619 net_device_ops function
620
621
622************************************************************************/
623static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
624{
625 struct sh_irda_self *self = netdev_priv(ndev);
626 struct device *dev = &self->ndev->dev;
627 int speed = irda_get_next_speed(skb);
628 int ret;
629
630 dev_dbg(dev, "hard xmit\n");
631
632 netif_stop_queue(ndev);
633 sh_irda_rcv_ctrl(self, 0);
634
635 ret = sh_irda_set_baudrate(self, speed);
636 if (ret < 0)
637 goto sh_irda_hard_xmit_end;
638
639 self->tx_buff.len = 0;
640 if (skb->len) {
641 unsigned long flags;
642
643 spin_lock_irqsave(&self->lock, flags);
644 self->tx_buff.len = async_wrap_skb(skb,
645 self->tx_buff.head,
646 self->tx_buff.truesize);
647 spin_unlock_irqrestore(&self->lock, flags);
648
649 if (self->tx_buff.len > self->tx_buff.truesize)
650 self->tx_buff.len = self->tx_buff.truesize;
651
652 sh_irda_write(self, IRTFLR, self->tx_buff.len);
653 sh_irda_write(self, IRTCTR, ARMOD | TE);
654 } else
655 goto sh_irda_hard_xmit_end;
656
657 dev_kfree_skb(skb);
658
659 return 0;
660
661sh_irda_hard_xmit_end:
662 sh_irda_set_baudrate(self, 9600);
663 netif_wake_queue(self->ndev);
664 sh_irda_rcv_ctrl(self, 1);
665 dev_kfree_skb(skb);
666
667 return ret;
668
669}
670
671static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
672{
673 /*
674 * FIXME
675 *
676 * This function is needed for irda framework.
677 * But nothing to do now
678 */
679 return 0;
680}
681
682static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
683{
684 struct sh_irda_self *self = netdev_priv(ndev);
685
686 return &self->ndev->stats;
687}
688
689static int sh_irda_open(struct net_device *ndev)
690{
691 struct sh_irda_self *self = netdev_priv(ndev);
692 int err;
693
694 pm_runtime_get_sync(&self->pdev->dev);
695 err = sh_irda_crc_init(self);
696 if (err)
697 goto open_err;
698
699 sh_irda_set_mode(self, SH_IRDA_SIR);
700 sh_irda_set_timeout(self, 2);
701 sh_irda_set_baudrate(self, 9600);
702
703 self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
704 if (!self->irlap) {
705 err = -ENODEV;
706 goto open_err;
707 }
708
709 netif_start_queue(ndev);
710 sh_irda_rcv_ctrl(self, 1);
711 sh_irda_set_irq_mask(self);
712
713 dev_info(&ndev->dev, "opened\n");
714
715 return 0;
716
717open_err:
718 pm_runtime_put_sync(&self->pdev->dev);
719
720 return err;
721}
722
723static int sh_irda_stop(struct net_device *ndev)
724{
725 struct sh_irda_self *self = netdev_priv(ndev);
726
727 /* Stop IrLAP */
728 if (self->irlap) {
729 irlap_close(self->irlap);
730 self->irlap = NULL;
731 }
732
733 netif_stop_queue(ndev);
734 pm_runtime_put_sync(&self->pdev->dev);
735
736 dev_info(&ndev->dev, "stopped\n");
737
738 return 0;
739}
740
741static const struct net_device_ops sh_irda_ndo = {
742 .ndo_open = sh_irda_open,
743 .ndo_stop = sh_irda_stop,
744 .ndo_start_xmit = sh_irda_hard_xmit,
745 .ndo_do_ioctl = sh_irda_ioctl,
746 .ndo_get_stats = sh_irda_stats,
747};
748
749/************************************************************************
750
751
752 platform_driver function
753
754
755************************************************************************/
756static int sh_irda_probe(struct platform_device *pdev)
757{
758 struct net_device *ndev;
759 struct sh_irda_self *self;
760 struct resource *res;
761 int irq;
762 int err = -ENOMEM;
763
764 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
765 irq = platform_get_irq(pdev, 0);
766 if (!res || irq < 0) {
767 dev_err(&pdev->dev, "Not enough platform resources.\n");
768 goto exit;
769 }
770
771 ndev = alloc_irdadev(sizeof(*self));
772 if (!ndev)
773 goto exit;
774
775 self = netdev_priv(ndev);
776 self->membase = ioremap_nocache(res->start, resource_size(res));
777 if (!self->membase) {
778 err = -ENXIO;
779 dev_err(&pdev->dev, "Unable to ioremap.\n");
780 goto err_mem_1;
781 }
782
783 err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
784 if (err)
785 goto err_mem_2;
786
787 self->pdev = pdev;
788 pm_runtime_enable(&pdev->dev);
789
790 irda_init_max_qos_capabilies(&self->qos);
791
792 ndev->netdev_ops = &sh_irda_ndo;
793 ndev->irq = irq;
794
795 self->ndev = ndev;
796 self->qos.baud_rate.bits &= IR_9600; /* FIXME */
797 self->qos.min_turn_time.bits = 1; /* 10 ms or more */
798 spin_lock_init(&self->lock);
799
800 irda_qos_bits_to_value(&self->qos);
801
802 err = register_netdev(ndev);
803 if (err)
804 goto err_mem_4;
805
806 platform_set_drvdata(pdev, ndev);
807 err = devm_request_irq(&pdev->dev, irq, sh_irda_irq, 0, "sh_irda", self);
808 if (err) {
809 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
810 goto err_mem_4;
811 }
812
813 dev_info(&pdev->dev, "SuperH IrDA probed\n");
814
815 goto exit;
816
817err_mem_4:
818 pm_runtime_disable(&pdev->dev);
819 sh_irda_remove_iobuf(self);
820err_mem_2:
821 iounmap(self->membase);
822err_mem_1:
823 free_netdev(ndev);
824exit:
825 return err;
826}
827
828static int sh_irda_remove(struct platform_device *pdev)
829{
830 struct net_device *ndev = platform_get_drvdata(pdev);
831 struct sh_irda_self *self = netdev_priv(ndev);
832
833 if (!self)
834 return 0;
835
836 unregister_netdev(ndev);
837 pm_runtime_disable(&pdev->dev);
838 sh_irda_remove_iobuf(self);
839 iounmap(self->membase);
840 free_netdev(ndev);
841
842 return 0;
843}
844
845static int sh_irda_runtime_nop(struct device *dev)
846{
847 /* Runtime PM callback shared between ->runtime_suspend()
848 * and ->runtime_resume(). Simply returns success.
849 *
850 * This driver re-initializes all registers after
851 * pm_runtime_get_sync() anyway so there is no need
852 * to save and restore registers here.
853 */
854 return 0;
855}
856
857static const struct dev_pm_ops sh_irda_pm_ops = {
858 .runtime_suspend = sh_irda_runtime_nop,
859 .runtime_resume = sh_irda_runtime_nop,
860};
861
862static struct platform_driver sh_irda_driver = {
863 .probe = sh_irda_probe,
864 .remove = sh_irda_remove,
865 .driver = {
866 .name = DRIVER_NAME,
867 .pm = &sh_irda_pm_ops,
868 },
869};
870
871module_platform_driver(sh_irda_driver);
872
873MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
874MODULE_DESCRIPTION("SuperH IrDA driver");
875MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 1e901c7cfaac..b3ffaee30858 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -277,12 +277,16 @@ static int at803x_probe(struct phy_device *phydev)
277 if (!priv) 277 if (!priv)
278 return -ENOMEM; 278 return -ENOMEM;
279 279
280 gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); 280 if (phydev->drv->phy_id != ATH8030_PHY_ID)
281 goto does_not_require_reset_workaround;
282
283 gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
281 if (IS_ERR(gpiod_reset)) 284 if (IS_ERR(gpiod_reset))
282 return PTR_ERR(gpiod_reset); 285 return PTR_ERR(gpiod_reset);
283 286
284 priv->gpiod_reset = gpiod_reset; 287 priv->gpiod_reset = gpiod_reset;
285 288
289does_not_require_reset_workaround:
286 phydev->priv = priv; 290 phydev->priv = priv;
287 291
288 return 0; 292 return 0;
@@ -362,10 +366,10 @@ static void at803x_link_change_notify(struct phy_device *phydev)
362 366
363 at803x_context_save(phydev, &context); 367 at803x_context_save(phydev, &context);
364 368
365 gpiod_set_value(priv->gpiod_reset, 0);
366 msleep(1);
367 gpiod_set_value(priv->gpiod_reset, 1); 369 gpiod_set_value(priv->gpiod_reset, 1);
368 msleep(1); 370 msleep(1);
371 gpiod_set_value(priv->gpiod_reset, 0);
372 msleep(1);
369 373
370 at803x_context_restore(phydev, &context); 374 at803x_context_restore(phydev, &context);
371 375
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b881a7b1e4f6..9636da0b6efc 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -339,6 +339,8 @@ static struct phy_driver bcm7xxx_driver[] = {
339 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), 339 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
340 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"), 340 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
341 BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), 341 BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
342 BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
343 BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
342 BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"), 344 BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
343 BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"), 345 BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"),
344 BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"), 346 BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"),
@@ -348,6 +350,8 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
348 { PHY_ID_BCM7250, 0xfffffff0, }, 350 { PHY_ID_BCM7250, 0xfffffff0, },
349 { PHY_ID_BCM7364, 0xfffffff0, }, 351 { PHY_ID_BCM7364, 0xfffffff0, },
350 { PHY_ID_BCM7366, 0xfffffff0, }, 352 { PHY_ID_BCM7366, 0xfffffff0, },
353 { PHY_ID_BCM7346, 0xfffffff0, },
354 { PHY_ID_BCM7362, 0xfffffff0, },
351 { PHY_ID_BCM7425, 0xfffffff0, }, 355 { PHY_ID_BCM7425, 0xfffffff0, },
352 { PHY_ID_BCM7429, 0xfffffff0, }, 356 { PHY_ID_BCM7429, 0xfffffff0, },
353 { PHY_ID_BCM7439, 0xfffffff0, }, 357 { PHY_ID_BCM7439, 0xfffffff0, },
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index f70522c35163..135296508a7e 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -122,6 +122,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
122 return -EPROBE_DEFER; 122 return -EPROBE_DEFER;
123 123
124 dev_info(&pdev->dev, "no regulator found\n"); 124 dev_info(&pdev->dev, "no regulator found\n");
125 data->regulator = NULL;
125 } else { 126 } else {
126 ret = regulator_enable(data->regulator); 127 ret = regulator_enable(data->regulator);
127 if (ret) 128 if (ret)
@@ -137,7 +138,8 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
137 return 0; 138 return 0;
138 139
139err_out_disable_regulator: 140err_out_disable_regulator:
140 regulator_disable(data->regulator); 141 if (data->regulator)
142 regulator_disable(data->regulator);
141err_out_free_mdiobus: 143err_out_free_mdiobus:
142 mdiobus_free(bus); 144 mdiobus_free(bus);
143 return ret; 145 return ret;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 4fd861063ed4..f572b31a2b20 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2307,7 +2307,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
2307 2307
2308 pch->ppp = NULL; 2308 pch->ppp = NULL;
2309 pch->chan = chan; 2309 pch->chan = chan;
2310 pch->chan_net = net; 2310 pch->chan_net = get_net(net);
2311 chan->ppp = pch; 2311 chan->ppp = pch;
2312 init_ppp_file(&pch->file, CHANNEL); 2312 init_ppp_file(&pch->file, CHANNEL);
2313 pch->file.hdrlen = chan->hdrlen; 2313 pch->file.hdrlen = chan->hdrlen;
@@ -2404,6 +2404,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
2404 spin_lock_bh(&pn->all_channels_lock); 2404 spin_lock_bh(&pn->all_channels_lock);
2405 list_del(&pch->list); 2405 list_del(&pch->list);
2406 spin_unlock_bh(&pn->all_channels_lock); 2406 spin_unlock_bh(&pn->all_channels_lock);
2407 put_net(pch->chan_net);
2408 pch->chan_net = NULL;
2407 2409
2408 pch->file.dead = 1; 2410 pch->file.dead = 1;
2409 wake_up_interruptible(&pch->file.rwait); 2411 wake_up_interruptible(&pch->file.rwait);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 01f08a7751f7..9cfe6aeac84e 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -24,6 +24,7 @@
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <linux/crc32.h> 25#include <linux/crc32.h>
26#include <linux/ethtool.h> 26#include <linux/ethtool.h>
27#include <linux/reboot.h>
27 28
28#define DRV_NAME "rionet" 29#define DRV_NAME "rionet"
29#define DRV_VERSION "0.3" 30#define DRV_VERSION "0.3"
@@ -48,6 +49,8 @@ MODULE_LICENSE("GPL");
48#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE 49#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
49#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE 50#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
50#define RIONET_MAX_NETS 8 51#define RIONET_MAX_NETS 8
52#define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE
53#define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN)
51 54
52struct rionet_private { 55struct rionet_private {
53 struct rio_mport *mport; 56 struct rio_mport *mport;
@@ -60,6 +63,7 @@ struct rionet_private {
60 spinlock_t lock; 63 spinlock_t lock;
61 spinlock_t tx_lock; 64 spinlock_t tx_lock;
62 u32 msg_enable; 65 u32 msg_enable;
66 bool open;
63}; 67};
64 68
65struct rionet_peer { 69struct rionet_peer {
@@ -71,6 +75,7 @@ struct rionet_peer {
71struct rionet_net { 75struct rionet_net {
72 struct net_device *ndev; 76 struct net_device *ndev;
73 struct list_head peers; 77 struct list_head peers;
78 spinlock_t lock; /* net info access lock */
74 struct rio_dev **active; 79 struct rio_dev **active;
75 int nact; /* number of active peers */ 80 int nact; /* number of active peers */
76}; 81};
@@ -232,26 +237,32 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
232 struct net_device *ndev = dev_id; 237 struct net_device *ndev = dev_id;
233 struct rionet_private *rnet = netdev_priv(ndev); 238 struct rionet_private *rnet = netdev_priv(ndev);
234 struct rionet_peer *peer; 239 struct rionet_peer *peer;
240 unsigned char netid = rnet->mport->id;
235 241
236 if (netif_msg_intr(rnet)) 242 if (netif_msg_intr(rnet))
237 printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x", 243 printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
238 DRV_NAME, sid, tid, info); 244 DRV_NAME, sid, tid, info);
239 if (info == RIONET_DOORBELL_JOIN) { 245 if (info == RIONET_DOORBELL_JOIN) {
240 if (!nets[rnet->mport->id].active[sid]) { 246 if (!nets[netid].active[sid]) {
241 list_for_each_entry(peer, 247 spin_lock(&nets[netid].lock);
242 &nets[rnet->mport->id].peers, node) { 248 list_for_each_entry(peer, &nets[netid].peers, node) {
243 if (peer->rdev->destid == sid) { 249 if (peer->rdev->destid == sid) {
244 nets[rnet->mport->id].active[sid] = 250 nets[netid].active[sid] = peer->rdev;
245 peer->rdev; 251 nets[netid].nact++;
246 nets[rnet->mport->id].nact++;
247 } 252 }
248 } 253 }
254 spin_unlock(&nets[netid].lock);
255
249 rio_mport_send_doorbell(mport, sid, 256 rio_mport_send_doorbell(mport, sid,
250 RIONET_DOORBELL_JOIN); 257 RIONET_DOORBELL_JOIN);
251 } 258 }
252 } else if (info == RIONET_DOORBELL_LEAVE) { 259 } else if (info == RIONET_DOORBELL_LEAVE) {
253 nets[rnet->mport->id].active[sid] = NULL; 260 spin_lock(&nets[netid].lock);
254 nets[rnet->mport->id].nact--; 261 if (nets[netid].active[sid]) {
262 nets[netid].active[sid] = NULL;
263 nets[netid].nact--;
264 }
265 spin_unlock(&nets[netid].lock);
255 } else { 266 } else {
256 if (netif_msg_intr(rnet)) 267 if (netif_msg_intr(rnet))
257 printk(KERN_WARNING "%s: unhandled doorbell\n", 268 printk(KERN_WARNING "%s: unhandled doorbell\n",
@@ -280,7 +291,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
280 struct net_device *ndev = dev_id; 291 struct net_device *ndev = dev_id;
281 struct rionet_private *rnet = netdev_priv(ndev); 292 struct rionet_private *rnet = netdev_priv(ndev);
282 293
283 spin_lock(&rnet->lock); 294 spin_lock(&rnet->tx_lock);
284 295
285 if (netif_msg_intr(rnet)) 296 if (netif_msg_intr(rnet))
286 printk(KERN_INFO 297 printk(KERN_INFO
@@ -299,14 +310,16 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
299 if (rnet->tx_cnt < RIONET_TX_RING_SIZE) 310 if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
300 netif_wake_queue(ndev); 311 netif_wake_queue(ndev);
301 312
302 spin_unlock(&rnet->lock); 313 spin_unlock(&rnet->tx_lock);
303} 314}
304 315
305static int rionet_open(struct net_device *ndev) 316static int rionet_open(struct net_device *ndev)
306{ 317{
307 int i, rc = 0; 318 int i, rc = 0;
308 struct rionet_peer *peer, *tmp; 319 struct rionet_peer *peer;
309 struct rionet_private *rnet = netdev_priv(ndev); 320 struct rionet_private *rnet = netdev_priv(ndev);
321 unsigned char netid = rnet->mport->id;
322 unsigned long flags;
310 323
311 if (netif_msg_ifup(rnet)) 324 if (netif_msg_ifup(rnet))
312 printk(KERN_INFO "%s: open\n", DRV_NAME); 325 printk(KERN_INFO "%s: open\n", DRV_NAME);
@@ -345,20 +358,13 @@ static int rionet_open(struct net_device *ndev)
345 netif_carrier_on(ndev); 358 netif_carrier_on(ndev);
346 netif_start_queue(ndev); 359 netif_start_queue(ndev);
347 360
348 list_for_each_entry_safe(peer, tmp, 361 spin_lock_irqsave(&nets[netid].lock, flags);
349 &nets[rnet->mport->id].peers, node) { 362 list_for_each_entry(peer, &nets[netid].peers, node) {
350 if (!(peer->res = rio_request_outb_dbell(peer->rdev,
351 RIONET_DOORBELL_JOIN,
352 RIONET_DOORBELL_LEAVE)))
353 {
354 printk(KERN_ERR "%s: error requesting doorbells\n",
355 DRV_NAME);
356 continue;
357 }
358
359 /* Send a join message */ 363 /* Send a join message */
360 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); 364 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
361 } 365 }
366 spin_unlock_irqrestore(&nets[netid].lock, flags);
367 rnet->open = true;
362 368
363 out: 369 out:
364 return rc; 370 return rc;
@@ -367,7 +373,9 @@ static int rionet_open(struct net_device *ndev)
367static int rionet_close(struct net_device *ndev) 373static int rionet_close(struct net_device *ndev)
368{ 374{
369 struct rionet_private *rnet = netdev_priv(ndev); 375 struct rionet_private *rnet = netdev_priv(ndev);
370 struct rionet_peer *peer, *tmp; 376 struct rionet_peer *peer;
377 unsigned char netid = rnet->mport->id;
378 unsigned long flags;
371 int i; 379 int i;
372 380
373 if (netif_msg_ifup(rnet)) 381 if (netif_msg_ifup(rnet))
@@ -375,18 +383,21 @@ static int rionet_close(struct net_device *ndev)
375 383
376 netif_stop_queue(ndev); 384 netif_stop_queue(ndev);
377 netif_carrier_off(ndev); 385 netif_carrier_off(ndev);
386 rnet->open = false;
378 387
379 for (i = 0; i < RIONET_RX_RING_SIZE; i++) 388 for (i = 0; i < RIONET_RX_RING_SIZE; i++)
380 kfree_skb(rnet->rx_skb[i]); 389 kfree_skb(rnet->rx_skb[i]);
381 390
382 list_for_each_entry_safe(peer, tmp, 391 spin_lock_irqsave(&nets[netid].lock, flags);
383 &nets[rnet->mport->id].peers, node) { 392 list_for_each_entry(peer, &nets[netid].peers, node) {
384 if (nets[rnet->mport->id].active[peer->rdev->destid]) { 393 if (nets[netid].active[peer->rdev->destid]) {
385 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE); 394 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
386 nets[rnet->mport->id].active[peer->rdev->destid] = NULL; 395 nets[netid].active[peer->rdev->destid] = NULL;
387 } 396 }
388 rio_release_outb_dbell(peer->rdev, peer->res); 397 if (peer->res)
398 rio_release_outb_dbell(peer->rdev, peer->res);
389 } 399 }
400 spin_unlock_irqrestore(&nets[netid].lock, flags);
390 401
391 rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN, 402 rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
392 RIONET_DOORBELL_LEAVE); 403 RIONET_DOORBELL_LEAVE);
@@ -400,22 +411,38 @@ static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
400{ 411{
401 struct rio_dev *rdev = to_rio_dev(dev); 412 struct rio_dev *rdev = to_rio_dev(dev);
402 unsigned char netid = rdev->net->hport->id; 413 unsigned char netid = rdev->net->hport->id;
403 struct rionet_peer *peer, *tmp; 414 struct rionet_peer *peer;
415 int state, found = 0;
416 unsigned long flags;
404 417
405 if (dev_rionet_capable(rdev)) { 418 if (!dev_rionet_capable(rdev))
406 list_for_each_entry_safe(peer, tmp, &nets[netid].peers, node) { 419 return;
407 if (peer->rdev == rdev) { 420
408 if (nets[netid].active[rdev->destid]) { 421 spin_lock_irqsave(&nets[netid].lock, flags);
409 nets[netid].active[rdev->destid] = NULL; 422 list_for_each_entry(peer, &nets[netid].peers, node) {
410 nets[netid].nact--; 423 if (peer->rdev == rdev) {
424 list_del(&peer->node);
425 if (nets[netid].active[rdev->destid]) {
426 state = atomic_read(&rdev->state);
427 if (state != RIO_DEVICE_GONE &&
428 state != RIO_DEVICE_INITIALIZING) {
429 rio_send_doorbell(rdev,
430 RIONET_DOORBELL_LEAVE);
411 } 431 }
412 432 nets[netid].active[rdev->destid] = NULL;
413 list_del(&peer->node); 433 nets[netid].nact--;
414 kfree(peer);
415 break;
416 } 434 }
435 found = 1;
436 break;
417 } 437 }
418 } 438 }
439 spin_unlock_irqrestore(&nets[netid].lock, flags);
440
441 if (found) {
442 if (peer->res)
443 rio_release_outb_dbell(rdev, peer->res);
444 kfree(peer);
445 }
419} 446}
420 447
421static void rionet_get_drvinfo(struct net_device *ndev, 448static void rionet_get_drvinfo(struct net_device *ndev,
@@ -443,6 +470,17 @@ static void rionet_set_msglevel(struct net_device *ndev, u32 value)
443 rnet->msg_enable = value; 470 rnet->msg_enable = value;
444} 471}
445 472
473static int rionet_change_mtu(struct net_device *ndev, int new_mtu)
474{
475 if ((new_mtu < 68) || (new_mtu > RIONET_MAX_MTU)) {
476 printk(KERN_ERR "%s: Invalid MTU size %d\n",
477 ndev->name, new_mtu);
478 return -EINVAL;
479 }
480 ndev->mtu = new_mtu;
481 return 0;
482}
483
446static const struct ethtool_ops rionet_ethtool_ops = { 484static const struct ethtool_ops rionet_ethtool_ops = {
447 .get_drvinfo = rionet_get_drvinfo, 485 .get_drvinfo = rionet_get_drvinfo,
448 .get_msglevel = rionet_get_msglevel, 486 .get_msglevel = rionet_get_msglevel,
@@ -454,7 +492,7 @@ static const struct net_device_ops rionet_netdev_ops = {
454 .ndo_open = rionet_open, 492 .ndo_open = rionet_open,
455 .ndo_stop = rionet_close, 493 .ndo_stop = rionet_close,
456 .ndo_start_xmit = rionet_start_xmit, 494 .ndo_start_xmit = rionet_start_xmit,
457 .ndo_change_mtu = eth_change_mtu, 495 .ndo_change_mtu = rionet_change_mtu,
458 .ndo_validate_addr = eth_validate_addr, 496 .ndo_validate_addr = eth_validate_addr,
459 .ndo_set_mac_address = eth_mac_addr, 497 .ndo_set_mac_address = eth_mac_addr,
460}; 498};
@@ -478,6 +516,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
478 /* Set up private area */ 516 /* Set up private area */
479 rnet = netdev_priv(ndev); 517 rnet = netdev_priv(ndev);
480 rnet->mport = mport; 518 rnet->mport = mport;
519 rnet->open = false;
481 520
482 /* Set the default MAC address */ 521 /* Set the default MAC address */
483 device_id = rio_local_get_device_id(mport); 522 device_id = rio_local_get_device_id(mport);
@@ -489,7 +528,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
489 ndev->dev_addr[5] = device_id & 0xff; 528 ndev->dev_addr[5] = device_id & 0xff;
490 529
491 ndev->netdev_ops = &rionet_netdev_ops; 530 ndev->netdev_ops = &rionet_netdev_ops;
492 ndev->mtu = RIO_MAX_MSG_SIZE - 14; 531 ndev->mtu = RIONET_MAX_MTU;
493 ndev->features = NETIF_F_LLTX; 532 ndev->features = NETIF_F_LLTX;
494 SET_NETDEV_DEV(ndev, &mport->dev); 533 SET_NETDEV_DEV(ndev, &mport->dev);
495 ndev->ethtool_ops = &rionet_ethtool_ops; 534 ndev->ethtool_ops = &rionet_ethtool_ops;
@@ -500,8 +539,11 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
500 rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL; 539 rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
501 540
502 rc = register_netdev(ndev); 541 rc = register_netdev(ndev);
503 if (rc != 0) 542 if (rc != 0) {
543 free_pages((unsigned long)nets[mport->id].active,
544 get_order(rionet_active_bytes));
504 goto out; 545 goto out;
546 }
505 547
506 printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n", 548 printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n",
507 ndev->name, 549 ndev->name,
@@ -515,8 +557,6 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
515 return rc; 557 return rc;
516} 558}
517 559
518static unsigned long net_table[RIONET_MAX_NETS/sizeof(unsigned long) + 1];
519
520static int rionet_add_dev(struct device *dev, struct subsys_interface *sif) 560static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
521{ 561{
522 int rc = -ENODEV; 562 int rc = -ENODEV;
@@ -525,19 +565,16 @@ static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
525 struct net_device *ndev = NULL; 565 struct net_device *ndev = NULL;
526 struct rio_dev *rdev = to_rio_dev(dev); 566 struct rio_dev *rdev = to_rio_dev(dev);
527 unsigned char netid = rdev->net->hport->id; 567 unsigned char netid = rdev->net->hport->id;
528 int oldnet;
529 568
530 if (netid >= RIONET_MAX_NETS) 569 if (netid >= RIONET_MAX_NETS)
531 return rc; 570 return rc;
532 571
533 oldnet = test_and_set_bit(netid, net_table);
534
535 /* 572 /*
536 * If first time through this net, make sure local device is rionet 573 * If first time through this net, make sure local device is rionet
537 * capable and setup netdev (this step will be skipped in later probes 574 * capable and setup netdev (this step will be skipped in later probes
538 * on the same net). 575 * on the same net).
539 */ 576 */
540 if (!oldnet) { 577 if (!nets[netid].ndev) {
541 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, 578 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
542 &lsrc_ops); 579 &lsrc_ops);
543 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, 580 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
@@ -555,30 +592,56 @@ static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
555 rc = -ENOMEM; 592 rc = -ENOMEM;
556 goto out; 593 goto out;
557 } 594 }
558 nets[netid].ndev = ndev; 595
559 rc = rionet_setup_netdev(rdev->net->hport, ndev); 596 rc = rionet_setup_netdev(rdev->net->hport, ndev);
560 if (rc) { 597 if (rc) {
561 printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n", 598 printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n",
562 DRV_NAME, rc); 599 DRV_NAME, rc);
600 free_netdev(ndev);
563 goto out; 601 goto out;
564 } 602 }
565 603
566 INIT_LIST_HEAD(&nets[netid].peers); 604 INIT_LIST_HEAD(&nets[netid].peers);
605 spin_lock_init(&nets[netid].lock);
567 nets[netid].nact = 0; 606 nets[netid].nact = 0;
568 } else if (nets[netid].ndev == NULL) 607 nets[netid].ndev = ndev;
569 goto out; 608 }
570 609
571 /* 610 /*
572 * If the remote device has mailbox/doorbell capabilities, 611 * If the remote device has mailbox/doorbell capabilities,
573 * add it to the peer list. 612 * add it to the peer list.
574 */ 613 */
575 if (dev_rionet_capable(rdev)) { 614 if (dev_rionet_capable(rdev)) {
576 if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) { 615 struct rionet_private *rnet;
616 unsigned long flags;
617
618 rnet = netdev_priv(nets[netid].ndev);
619
620 peer = kzalloc(sizeof(*peer), GFP_KERNEL);
621 if (!peer) {
577 rc = -ENOMEM; 622 rc = -ENOMEM;
578 goto out; 623 goto out;
579 } 624 }
580 peer->rdev = rdev; 625 peer->rdev = rdev;
626 peer->res = rio_request_outb_dbell(peer->rdev,
627 RIONET_DOORBELL_JOIN,
628 RIONET_DOORBELL_LEAVE);
629 if (!peer->res) {
630 pr_err("%s: error requesting doorbells\n", DRV_NAME);
631 kfree(peer);
632 rc = -ENOMEM;
633 goto out;
634 }
635
636 spin_lock_irqsave(&nets[netid].lock, flags);
581 list_add_tail(&peer->node, &nets[netid].peers); 637 list_add_tail(&peer->node, &nets[netid].peers);
638 spin_unlock_irqrestore(&nets[netid].lock, flags);
639 pr_debug("%s: %s add peer %s\n",
640 DRV_NAME, __func__, rio_name(rdev));
641
642 /* If netdev is already opened, send join request to new peer */
643 if (rnet->open)
644 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
582 } 645 }
583 646
584 return 0; 647 return 0;
@@ -586,6 +649,61 @@ out:
586 return rc; 649 return rc;
587} 650}
588 651
652static int rionet_shutdown(struct notifier_block *nb, unsigned long code,
653 void *unused)
654{
655 struct rionet_peer *peer;
656 unsigned long flags;
657 int i;
658
659 pr_debug("%s: %s\n", DRV_NAME, __func__);
660
661 for (i = 0; i < RIONET_MAX_NETS; i++) {
662 if (!nets[i].ndev)
663 continue;
664
665 spin_lock_irqsave(&nets[i].lock, flags);
666 list_for_each_entry(peer, &nets[i].peers, node) {
667 if (nets[i].active[peer->rdev->destid]) {
668 rio_send_doorbell(peer->rdev,
669 RIONET_DOORBELL_LEAVE);
670 nets[i].active[peer->rdev->destid] = NULL;
671 }
672 }
673 spin_unlock_irqrestore(&nets[i].lock, flags);
674 }
675
676 return NOTIFY_DONE;
677}
678
679static void rionet_remove_mport(struct device *dev,
680 struct class_interface *class_intf)
681{
682 struct rio_mport *mport = to_rio_mport(dev);
683 struct net_device *ndev;
684 int id = mport->id;
685
686 pr_debug("%s %s\n", __func__, mport->name);
687
688 WARN(nets[id].nact, "%s called when connected to %d peers\n",
689 __func__, nets[id].nact);
690 WARN(!nets[id].ndev, "%s called for mport without NDEV\n",
691 __func__);
692
693 if (nets[id].ndev) {
694 ndev = nets[id].ndev;
695 netif_stop_queue(ndev);
696 unregister_netdev(ndev);
697
698 free_pages((unsigned long)nets[id].active,
699 get_order(sizeof(void *) *
700 RIO_MAX_ROUTE_ENTRIES(mport->sys_size)));
701 nets[id].active = NULL;
702 free_netdev(ndev);
703 nets[id].ndev = NULL;
704 }
705}
706
589#ifdef MODULE 707#ifdef MODULE
590static struct rio_device_id rionet_id_table[] = { 708static struct rio_device_id rionet_id_table[] = {
591 {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}, 709 {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)},
@@ -602,40 +720,43 @@ static struct subsys_interface rionet_interface = {
602 .remove_dev = rionet_remove_dev, 720 .remove_dev = rionet_remove_dev,
603}; 721};
604 722
723static struct notifier_block rionet_notifier = {
724 .notifier_call = rionet_shutdown,
725};
726
727/* the rio_mport_interface is used to handle local mport devices */
728static struct class_interface rio_mport_interface __refdata = {
729 .class = &rio_mport_class,
730 .add_dev = NULL,
731 .remove_dev = rionet_remove_mport,
732};
733
605static int __init rionet_init(void) 734static int __init rionet_init(void)
606{ 735{
736 int ret;
737
738 ret = register_reboot_notifier(&rionet_notifier);
739 if (ret) {
740 pr_err("%s: failed to register reboot notifier (err=%d)\n",
741 DRV_NAME, ret);
742 return ret;
743 }
744
745 ret = class_interface_register(&rio_mport_interface);
746 if (ret) {
747 pr_err("%s: class_interface_register error: %d\n",
748 DRV_NAME, ret);
749 return ret;
750 }
751
607 return subsys_interface_register(&rionet_interface); 752 return subsys_interface_register(&rionet_interface);
608} 753}
609 754
610static void __exit rionet_exit(void) 755static void __exit rionet_exit(void)
611{ 756{
612 struct rionet_private *rnet; 757 unregister_reboot_notifier(&rionet_notifier);
613 struct net_device *ndev;
614 struct rionet_peer *peer, *tmp;
615 int i;
616
617 for (i = 0; i < RIONET_MAX_NETS; i++) {
618 if (nets[i].ndev != NULL) {
619 ndev = nets[i].ndev;
620 rnet = netdev_priv(ndev);
621 unregister_netdev(ndev);
622
623 list_for_each_entry_safe(peer,
624 tmp, &nets[i].peers, node) {
625 list_del(&peer->node);
626 kfree(peer);
627 }
628
629 free_pages((unsigned long)nets[i].active,
630 get_order(sizeof(void *) *
631 RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size)));
632 nets[i].active = NULL;
633
634 free_netdev(ndev);
635 }
636 }
637
638 subsys_interface_unregister(&rionet_interface); 758 subsys_interface_unregister(&rionet_interface);
759 class_interface_unregister(&rio_mport_interface);
639} 760}
640 761
641late_initcall(rionet_init); 762late_initcall(rionet_init);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 26c64d2782fa..a0f64cba86ba 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1198,6 +1198,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1198 goto err_dev_open; 1198 goto err_dev_open;
1199 } 1199 }
1200 1200
1201 dev_uc_sync_multiple(port_dev, dev);
1202 dev_mc_sync_multiple(port_dev, dev);
1203
1201 err = vlan_vids_add_by_dev(port_dev, dev); 1204 err = vlan_vids_add_by_dev(port_dev, dev);
1202 if (err) { 1205 if (err) {
1203 netdev_err(dev, "Failed to add vlan ids to device %s\n", 1206 netdev_err(dev, "Failed to add vlan ids to device %s\n",
@@ -1261,6 +1264,8 @@ err_enable_netpoll:
1261 vlan_vids_del_by_dev(port_dev, dev); 1264 vlan_vids_del_by_dev(port_dev, dev);
1262 1265
1263err_vids_add: 1266err_vids_add:
1267 dev_uc_unsync(port_dev, dev);
1268 dev_mc_unsync(port_dev, dev);
1264 dev_close(port_dev); 1269 dev_close(port_dev);
1265 1270
1266err_dev_open: 1271err_dev_open:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index afdf950617c3..a74661690a11 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -622,7 +622,9 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
622 622
623 /* Re-attach the filter to persist device */ 623 /* Re-attach the filter to persist device */
624 if (!skip_filter && (tun->filter_attached == true)) { 624 if (!skip_filter && (tun->filter_attached == true)) {
625 lock_sock(tfile->socket.sk);
625 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 626 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
627 release_sock(tfile->socket.sk);
626 if (!err) 628 if (!err)
627 goto out; 629 goto out;
628 } 630 }
@@ -860,7 +862,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
860 goto drop; 862 goto drop;
861 863
862 if (skb->sk && sk_fullsock(skb->sk)) { 864 if (skb->sk && sk_fullsock(skb->sk)) {
863 sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags); 865 sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags,
866 &skb_shinfo(skb)->tx_flags);
864 sw_tx_timestamp(skb); 867 sw_tx_timestamp(skb);
865 } 868 }
866 869
@@ -1014,7 +1017,6 @@ static void tun_net_init(struct net_device *dev)
1014 /* Zero header length */ 1017 /* Zero header length */
1015 dev->type = ARPHRD_NONE; 1018 dev->type = ARPHRD_NONE;
1016 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1019 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1017 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
1018 break; 1020 break;
1019 1021
1020 case IFF_TAP: 1022 case IFF_TAP:
@@ -1026,7 +1028,6 @@ static void tun_net_init(struct net_device *dev)
1026 1028
1027 eth_hw_addr_random(dev); 1029 eth_hw_addr_random(dev);
1028 1030
1029 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
1030 break; 1031 break;
1031 } 1032 }
1032} 1033}
@@ -1480,6 +1481,8 @@ static void tun_setup(struct net_device *dev)
1480 1481
1481 dev->ethtool_ops = &tun_ethtool_ops; 1482 dev->ethtool_ops = &tun_ethtool_ops;
1482 dev->destructor = tun_free_netdev; 1483 dev->destructor = tun_free_netdev;
1484 /* We prefer our own queue length */
1485 dev->tx_queue_len = TUN_READQ_SIZE;
1483} 1486}
1484 1487
1485/* Trivial set of netlink ops to allow deleting tun or tap 1488/* Trivial set of netlink ops to allow deleting tun or tap
@@ -1822,7 +1825,9 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
1822 1825
1823 for (i = 0; i < n; i++) { 1826 for (i = 0; i < n; i++) {
1824 tfile = rtnl_dereference(tun->tfiles[i]); 1827 tfile = rtnl_dereference(tun->tfiles[i]);
1828 lock_sock(tfile->socket.sk);
1825 sk_detach_filter(tfile->socket.sk); 1829 sk_detach_filter(tfile->socket.sk);
1830 release_sock(tfile->socket.sk);
1826 } 1831 }
1827 1832
1828 tun->filter_attached = false; 1833 tun->filter_attached = false;
@@ -1835,7 +1840,9 @@ static int tun_attach_filter(struct tun_struct *tun)
1835 1840
1836 for (i = 0; i < tun->numqueues; i++) { 1841 for (i = 0; i < tun->numqueues; i++) {
1837 tfile = rtnl_dereference(tun->tfiles[i]); 1842 tfile = rtnl_dereference(tun->tfiles[i]);
1843 lock_sock(tfile->socket.sk);
1838 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 1844 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
1845 release_sock(tfile->socket.sk);
1839 if (ret) { 1846 if (ret) {
1840 tun_detach_filter(tun, i); 1847 tun_detach_filter(tun, i);
1841 return ret; 1848 return ret;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 86ba30ba35e8..2fb31edab125 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1626,6 +1626,13 @@ static const struct usb_device_id cdc_devs[] = {
1626 .driver_info = (unsigned long) &wwan_info, 1626 .driver_info = (unsigned long) &wwan_info,
1627 }, 1627 },
1628 1628
1629 /* Telit LE910 V2 */
1630 { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x0036,
1631 USB_CLASS_COMM,
1632 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
1633 .driver_info = (unsigned long)&wwan_noarp_info,
1634 },
1635
1629 /* DW5812 LTE Verizon Mobile Broadband Card 1636 /* DW5812 LTE Verizon Mobile Broadband Card
1630 * Unlike DW5550 this device requires FLAG_NOARP 1637 * Unlike DW5550 this device requires FLAG_NOARP
1631 */ 1638 */
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index d36d5ebf37f3..f20890ee03f3 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3261,54 +3261,6 @@ void lan78xx_tx_timeout(struct net_device *net)
3261 tasklet_schedule(&dev->bh); 3261 tasklet_schedule(&dev->bh);
3262} 3262}
3263 3263
3264struct rtnl_link_stats64 *lan78xx_get_stats64(struct net_device *netdev,
3265 struct rtnl_link_stats64 *storage)
3266{
3267 struct lan78xx_net *dev = netdev_priv(netdev);
3268 struct lan78xx_statstage64 stats;
3269
3270 /* curr_stat is updated by timer.
3271 * periodic reading from HW will prevent from entering USB auto suspend.
3272 * if autosuspend is disabled, read from HW.
3273 */
3274 if (!dev->udev->dev.power.runtime_auto)
3275 lan78xx_update_stats(dev);
3276
3277 mutex_lock(&dev->stats.access_lock);
3278 memcpy(&stats, &dev->stats.curr_stat, sizeof(stats));
3279 mutex_unlock(&dev->stats.access_lock);
3280
3281 /* calc by driver */
3282 storage->rx_packets = (__u64)netdev->stats.rx_packets;
3283 storage->tx_packets = (__u64)netdev->stats.tx_packets;
3284 storage->rx_bytes = (__u64)netdev->stats.rx_bytes;
3285 storage->tx_bytes = (__u64)netdev->stats.tx_bytes;
3286
3287 /* use counter */
3288 storage->rx_length_errors = stats.rx_undersize_frame_errors +
3289 stats.rx_oversize_frame_errors;
3290 storage->rx_crc_errors = stats.rx_fcs_errors;
3291 storage->rx_frame_errors = stats.rx_alignment_errors;
3292 storage->rx_fifo_errors = stats.rx_dropped_frames;
3293 storage->rx_over_errors = stats.rx_oversize_frame_errors;
3294 storage->rx_errors = stats.rx_fcs_errors +
3295 stats.rx_alignment_errors +
3296 stats.rx_fragment_errors +
3297 stats.rx_jabber_errors +
3298 stats.rx_undersize_frame_errors +
3299 stats.rx_oversize_frame_errors +
3300 stats.rx_dropped_frames;
3301
3302 storage->tx_carrier_errors = stats.tx_carrier_errors;
3303 storage->tx_errors = stats.tx_fcs_errors +
3304 stats.tx_excess_deferral_errors +
3305 stats.tx_carrier_errors;
3306
3307 storage->multicast = stats.rx_multicast_frames;
3308
3309 return storage;
3310}
3311
3312static const struct net_device_ops lan78xx_netdev_ops = { 3264static const struct net_device_ops lan78xx_netdev_ops = {
3313 .ndo_open = lan78xx_open, 3265 .ndo_open = lan78xx_open,
3314 .ndo_stop = lan78xx_stop, 3266 .ndo_stop = lan78xx_stop,
@@ -3322,7 +3274,6 @@ static const struct net_device_ops lan78xx_netdev_ops = {
3322 .ndo_set_features = lan78xx_set_features, 3274 .ndo_set_features = lan78xx_set_features,
3323 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid, 3275 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3324 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid, 3276 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3325 .ndo_get_stats64 = lan78xx_get_stats64,
3326}; 3277};
3327 3278
3328static void lan78xx_stat_monitor(unsigned long param) 3279static void lan78xx_stat_monitor(unsigned long param)
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 1bfe0fcaccf5..22e1a9a99a7d 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -38,7 +38,7 @@
38 * HEADS UP: this handshaking isn't all that robust. This driver 38 * HEADS UP: this handshaking isn't all that robust. This driver
39 * gets confused easily if you unplug one end of the cable then 39 * gets confused easily if you unplug one end of the cable then
40 * try to connect it again; you'll need to restart both ends. The 40 * try to connect it again; you'll need to restart both ends. The
41 * "naplink" software (used by some PlayStation/2 deveopers) does 41 * "naplink" software (used by some PlayStation/2 developers) does
42 * the handshaking much better! Also, sometimes this hardware 42 * the handshaking much better! Also, sometimes this hardware
43 * seems to get wedged under load. Prolific docs are weak, and 43 * seems to get wedged under load. Prolific docs are weak, and
44 * don't identify differences between PL2301 and PL2302, much less 44 * don't identify differences between PL2301 and PL2302, much less
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 7d717c66bcb0..9d1fce8a6e84 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -844,6 +844,7 @@ static const struct usb_device_id products[] = {
844 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ 844 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
845 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ 845 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
846 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 846 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
847 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
847 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 848 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
848 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 849 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
849 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 850 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fb0eae42bf39..49d84e540343 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -260,7 +260,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
260 p = page_address(page) + offset; 260 p = page_address(page) + offset;
261 261
262 /* copy small packet so we can reuse these pages for small data */ 262 /* copy small packet so we can reuse these pages for small data */
263 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); 263 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
264 if (unlikely(!skb)) 264 if (unlikely(!skb))
265 return NULL; 265 return NULL;
266 266
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 800106a7246c..9f3634064c92 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -551,16 +551,15 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
551 return vh; 551 return vh;
552} 552}
553 553
554static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, 554static struct sk_buff **vxlan_gro_receive(struct sock *sk,
555 struct sk_buff *skb, 555 struct sk_buff **head,
556 struct udp_offload *uoff) 556 struct sk_buff *skb)
557{ 557{
558 struct sk_buff *p, **pp = NULL; 558 struct sk_buff *p, **pp = NULL;
559 struct vxlanhdr *vh, *vh2; 559 struct vxlanhdr *vh, *vh2;
560 unsigned int hlen, off_vx; 560 unsigned int hlen, off_vx;
561 int flush = 1; 561 int flush = 1;
562 struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock, 562 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
563 udp_offloads);
564 __be32 flags; 563 __be32 flags;
565 struct gro_remcsum grc; 564 struct gro_remcsum grc;
566 565
@@ -613,8 +612,7 @@ out:
613 return pp; 612 return pp;
614} 613}
615 614
616static int vxlan_gro_complete(struct sk_buff *skb, int nhoff, 615static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
617 struct udp_offload *uoff)
618{ 616{
619 udp_tunnel_gro_complete(skb, nhoff); 617 udp_tunnel_gro_complete(skb, nhoff);
620 618
@@ -629,13 +627,6 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
629 struct net *net = sock_net(sk); 627 struct net *net = sock_net(sk);
630 sa_family_t sa_family = vxlan_get_sk_family(vs); 628 sa_family_t sa_family = vxlan_get_sk_family(vs);
631 __be16 port = inet_sk(sk)->inet_sport; 629 __be16 port = inet_sk(sk)->inet_sport;
632 int err;
633
634 if (sa_family == AF_INET) {
635 err = udp_add_offload(net, &vs->udp_offloads);
636 if (err)
637 pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
638 }
639 630
640 rcu_read_lock(); 631 rcu_read_lock();
641 for_each_netdev_rcu(net, dev) { 632 for_each_netdev_rcu(net, dev) {
@@ -662,9 +653,6 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
662 port); 653 port);
663 } 654 }
664 rcu_read_unlock(); 655 rcu_read_unlock();
665
666 if (sa_family == AF_INET)
667 udp_del_offload(&vs->udp_offloads);
668} 656}
669 657
670/* Add new entry to forwarding table -- assumes lock held */ 658/* Add new entry to forwarding table -- assumes lock held */
@@ -1143,7 +1131,7 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1143static bool vxlan_remcsum(struct vxlanhdr *unparsed, 1131static bool vxlan_remcsum(struct vxlanhdr *unparsed,
1144 struct sk_buff *skb, u32 vxflags) 1132 struct sk_buff *skb, u32 vxflags)
1145{ 1133{
1146 size_t start, offset, plen; 1134 size_t start, offset;
1147 1135
1148 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload) 1136 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
1149 goto out; 1137 goto out;
@@ -1151,9 +1139,7 @@ static bool vxlan_remcsum(struct vxlanhdr *unparsed,
1151 start = vxlan_rco_start(unparsed->vx_vni); 1139 start = vxlan_rco_start(unparsed->vx_vni);
1152 offset = start + vxlan_rco_offset(unparsed->vx_vni); 1140 offset = start + vxlan_rco_offset(unparsed->vx_vni);
1153 1141
1154 plen = sizeof(struct vxlanhdr) + offset + sizeof(u16); 1142 if (!pskb_may_pull(skb, offset + sizeof(u16)))
1155
1156 if (!pskb_may_pull(skb, plen))
1157 return false; 1143 return false;
1158 1144
1159 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, 1145 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
@@ -1194,6 +1180,45 @@ out:
1194 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; 1180 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
1195} 1181}
1196 1182
1183static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
1184 __be32 *protocol,
1185 struct sk_buff *skb, u32 vxflags)
1186{
1187 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
1188
1189 /* Need to have Next Protocol set for interfaces in GPE mode. */
1190 if (!gpe->np_applied)
1191 return false;
1192 /* "The initial version is 0. If a receiver does not support the
1193 * version indicated it MUST drop the packet.
1194 */
1195 if (gpe->version != 0)
1196 return false;
1197 /* "When the O bit is set to 1, the packet is an OAM packet and OAM
1198 * processing MUST occur." However, we don't implement OAM
1199 * processing, thus drop the packet.
1200 */
1201 if (gpe->oam_flag)
1202 return false;
1203
1204 switch (gpe->next_protocol) {
1205 case VXLAN_GPE_NP_IPV4:
1206 *protocol = htons(ETH_P_IP);
1207 break;
1208 case VXLAN_GPE_NP_IPV6:
1209 *protocol = htons(ETH_P_IPV6);
1210 break;
1211 case VXLAN_GPE_NP_ETHERNET:
1212 *protocol = htons(ETH_P_TEB);
1213 break;
1214 default:
1215 return false;
1216 }
1217
1218 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
1219 return true;
1220}
1221
1197static bool vxlan_set_mac(struct vxlan_dev *vxlan, 1222static bool vxlan_set_mac(struct vxlan_dev *vxlan,
1198 struct vxlan_sock *vs, 1223 struct vxlan_sock *vs,
1199 struct sk_buff *skb) 1224 struct sk_buff *skb)
@@ -1259,9 +1284,11 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1259 struct vxlanhdr unparsed; 1284 struct vxlanhdr unparsed;
1260 struct vxlan_metadata _md; 1285 struct vxlan_metadata _md;
1261 struct vxlan_metadata *md = &_md; 1286 struct vxlan_metadata *md = &_md;
1287 __be32 protocol = htons(ETH_P_TEB);
1288 bool raw_proto = false;
1262 void *oiph; 1289 void *oiph;
1263 1290
1264 /* Need Vxlan and inner Ethernet header to be present */ 1291 /* Need UDP and VXLAN header to be present */
1265 if (!pskb_may_pull(skb, VXLAN_HLEN)) 1292 if (!pskb_may_pull(skb, VXLAN_HLEN))
1266 return 1; 1293 return 1;
1267 1294
@@ -1285,9 +1312,18 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1285 if (!vxlan) 1312 if (!vxlan)
1286 goto drop; 1313 goto drop;
1287 1314
1288 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB), 1315 /* For backwards compatibility, only allow reserved fields to be
1289 !net_eq(vxlan->net, dev_net(vxlan->dev)))) 1316 * used by VXLAN extensions if explicitly requested.
1290 goto drop; 1317 */
1318 if (vs->flags & VXLAN_F_GPE) {
1319 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
1320 goto drop;
1321 raw_proto = true;
1322 }
1323
1324 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
1325 !net_eq(vxlan->net, dev_net(vxlan->dev))))
1326 goto drop;
1291 1327
1292 if (vxlan_collect_metadata(vs)) { 1328 if (vxlan_collect_metadata(vs)) {
1293 __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); 1329 __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
@@ -1306,14 +1342,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1306 memset(md, 0, sizeof(*md)); 1342 memset(md, 0, sizeof(*md));
1307 } 1343 }
1308 1344
1309 /* For backwards compatibility, only allow reserved fields to be
1310 * used by VXLAN extensions if explicitly requested.
1311 */
1312 if (vs->flags & VXLAN_F_REMCSUM_RX) 1345 if (vs->flags & VXLAN_F_REMCSUM_RX)
1313 if (!vxlan_remcsum(&unparsed, skb, vs->flags)) 1346 if (!vxlan_remcsum(&unparsed, skb, vs->flags))
1314 goto drop; 1347 goto drop;
1315 if (vs->flags & VXLAN_F_GBP) 1348 if (vs->flags & VXLAN_F_GBP)
1316 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md); 1349 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
1350 /* Note that GBP and GPE can never be active together. This is
1351 * ensured in vxlan_dev_configure.
1352 */
1317 1353
1318 if (unparsed.vx_flags || unparsed.vx_vni) { 1354 if (unparsed.vx_flags || unparsed.vx_vni) {
1319 /* If there are any unprocessed flags remaining treat 1355 /* If there are any unprocessed flags remaining treat
@@ -1327,8 +1363,13 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1327 goto drop; 1363 goto drop;
1328 } 1364 }
1329 1365
1330 if (!vxlan_set_mac(vxlan, vs, skb)) 1366 if (!raw_proto) {
1331 goto drop; 1367 if (!vxlan_set_mac(vxlan, vs, skb))
1368 goto drop;
1369 } else {
1370 skb->dev = vxlan->dev;
1371 skb->pkt_type = PACKET_HOST;
1372 }
1332 1373
1333 oiph = skb_network_header(skb); 1374 oiph = skb_network_header(skb);
1334 skb_reset_network_header(skb); 1375 skb_reset_network_header(skb);
@@ -1687,6 +1728,27 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1687 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK); 1728 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1688} 1729}
1689 1730
1731static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
1732 __be16 protocol)
1733{
1734 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
1735
1736 gpe->np_applied = 1;
1737
1738 switch (protocol) {
1739 case htons(ETH_P_IP):
1740 gpe->next_protocol = VXLAN_GPE_NP_IPV4;
1741 return 0;
1742 case htons(ETH_P_IPV6):
1743 gpe->next_protocol = VXLAN_GPE_NP_IPV6;
1744 return 0;
1745 case htons(ETH_P_TEB):
1746 gpe->next_protocol = VXLAN_GPE_NP_ETHERNET;
1747 return 0;
1748 }
1749 return -EPFNOSUPPORT;
1750}
1751
1690static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, 1752static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1691 int iphdr_len, __be32 vni, 1753 int iphdr_len, __be32 vni,
1692 struct vxlan_metadata *md, u32 vxflags, 1754 struct vxlan_metadata *md, u32 vxflags,
@@ -1696,6 +1758,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1696 int min_headroom; 1758 int min_headroom;
1697 int err; 1759 int err;
1698 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; 1760 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1761 __be16 inner_protocol = htons(ETH_P_TEB);
1699 1762
1700 if ((vxflags & VXLAN_F_REMCSUM_TX) && 1763 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1701 skb->ip_summed == CHECKSUM_PARTIAL) { 1764 skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -1714,10 +1777,8 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1714 1777
1715 /* Need space for new headers (invalidates iph ptr) */ 1778 /* Need space for new headers (invalidates iph ptr) */
1716 err = skb_cow_head(skb, min_headroom); 1779 err = skb_cow_head(skb, min_headroom);
1717 if (unlikely(err)) { 1780 if (unlikely(err))
1718 kfree_skb(skb); 1781 goto out_free;
1719 return err;
1720 }
1721 1782
1722 skb = vlan_hwaccel_push_inside(skb); 1783 skb = vlan_hwaccel_push_inside(skb);
1723 if (WARN_ON(!skb)) 1784 if (WARN_ON(!skb))
@@ -1746,9 +1807,19 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1746 1807
1747 if (vxflags & VXLAN_F_GBP) 1808 if (vxflags & VXLAN_F_GBP)
1748 vxlan_build_gbp_hdr(vxh, vxflags, md); 1809 vxlan_build_gbp_hdr(vxh, vxflags, md);
1810 if (vxflags & VXLAN_F_GPE) {
1811 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
1812 if (err < 0)
1813 goto out_free;
1814 inner_protocol = skb->protocol;
1815 }
1749 1816
1750 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 1817 skb_set_inner_protocol(skb, inner_protocol);
1751 return 0; 1818 return 0;
1819
1820out_free:
1821 kfree_skb(skb);
1822 return err;
1752} 1823}
1753 1824
1754static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, 1825static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
@@ -1810,10 +1881,9 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1810 1881
1811 memset(&fl6, 0, sizeof(fl6)); 1882 memset(&fl6, 0, sizeof(fl6));
1812 fl6.flowi6_oif = oif; 1883 fl6.flowi6_oif = oif;
1813 fl6.flowi6_tos = RT_TOS(tos);
1814 fl6.daddr = *daddr; 1884 fl6.daddr = *daddr;
1815 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; 1885 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
1816 fl6.flowlabel = label; 1886 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
1817 fl6.flowi6_mark = skb->mark; 1887 fl6.flowi6_mark = skb->mark;
1818 fl6.flowi6_proto = IPPROTO_UDP; 1888 fl6.flowi6_proto = IPPROTO_UDP;
1819 1889
@@ -2109,9 +2179,17 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2109 info = skb_tunnel_info(skb); 2179 info = skb_tunnel_info(skb);
2110 2180
2111 skb_reset_mac_header(skb); 2181 skb_reset_mac_header(skb);
2112 eth = eth_hdr(skb);
2113 2182
2114 if ((vxlan->flags & VXLAN_F_PROXY)) { 2183 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
2184 if (info && info->mode & IP_TUNNEL_INFO_TX)
2185 vxlan_xmit_one(skb, dev, NULL, false);
2186 else
2187 kfree_skb(skb);
2188 return NETDEV_TX_OK;
2189 }
2190
2191 if (vxlan->flags & VXLAN_F_PROXY) {
2192 eth = eth_hdr(skb);
2115 if (ntohs(eth->h_proto) == ETH_P_ARP) 2193 if (ntohs(eth->h_proto) == ETH_P_ARP)
2116 return arp_reduce(dev, skb); 2194 return arp_reduce(dev, skb);
2117#if IS_ENABLED(CONFIG_IPV6) 2195#if IS_ENABLED(CONFIG_IPV6)
@@ -2126,18 +2204,10 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2126 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) 2204 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2127 return neigh_reduce(dev, skb); 2205 return neigh_reduce(dev, skb);
2128 } 2206 }
2129 eth = eth_hdr(skb);
2130#endif 2207#endif
2131 } 2208 }
2132 2209
2133 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) { 2210 eth = eth_hdr(skb);
2134 if (info && info->mode & IP_TUNNEL_INFO_TX)
2135 vxlan_xmit_one(skb, dev, NULL, false);
2136 else
2137 kfree_skb(skb);
2138 return NETDEV_TX_OK;
2139 }
2140
2141 f = vxlan_find_mac(vxlan, eth->h_dest); 2211 f = vxlan_find_mac(vxlan, eth->h_dest);
2142 did_rsc = false; 2212 did_rsc = false;
2143 2213
@@ -2407,7 +2477,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2407 return 0; 2477 return 0;
2408} 2478}
2409 2479
2410static const struct net_device_ops vxlan_netdev_ops = { 2480static const struct net_device_ops vxlan_netdev_ether_ops = {
2411 .ndo_init = vxlan_init, 2481 .ndo_init = vxlan_init,
2412 .ndo_uninit = vxlan_uninit, 2482 .ndo_uninit = vxlan_uninit,
2413 .ndo_open = vxlan_open, 2483 .ndo_open = vxlan_open,
@@ -2424,6 +2494,17 @@ static const struct net_device_ops vxlan_netdev_ops = {
2424 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 2494 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2425}; 2495};
2426 2496
2497static const struct net_device_ops vxlan_netdev_raw_ops = {
2498 .ndo_init = vxlan_init,
2499 .ndo_uninit = vxlan_uninit,
2500 .ndo_open = vxlan_open,
2501 .ndo_stop = vxlan_stop,
2502 .ndo_start_xmit = vxlan_xmit,
2503 .ndo_get_stats64 = ip_tunnel_get_stats64,
2504 .ndo_change_mtu = vxlan_change_mtu,
2505 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2506};
2507
2427/* Info for udev, that this is a virtual tunnel endpoint */ 2508/* Info for udev, that this is a virtual tunnel endpoint */
2428static struct device_type vxlan_type = { 2509static struct device_type vxlan_type = {
2429 .name = "vxlan", 2510 .name = "vxlan",
@@ -2461,10 +2542,6 @@ static void vxlan_setup(struct net_device *dev)
2461 struct vxlan_dev *vxlan = netdev_priv(dev); 2542 struct vxlan_dev *vxlan = netdev_priv(dev);
2462 unsigned int h; 2543 unsigned int h;
2463 2544
2464 eth_hw_addr_random(dev);
2465 ether_setup(dev);
2466
2467 dev->netdev_ops = &vxlan_netdev_ops;
2468 dev->destructor = free_netdev; 2545 dev->destructor = free_netdev;
2469 SET_NETDEV_DEVTYPE(dev, &vxlan_type); 2546 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2470 2547
@@ -2479,8 +2556,7 @@ static void vxlan_setup(struct net_device *dev)
2479 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2556 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2480 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2557 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2481 netif_keep_dst(dev); 2558 netif_keep_dst(dev);
2482 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 2559 dev->priv_flags |= IFF_NO_QUEUE;
2483 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
2484 2560
2485 INIT_LIST_HEAD(&vxlan->next); 2561 INIT_LIST_HEAD(&vxlan->next);
2486 spin_lock_init(&vxlan->hash_lock); 2562 spin_lock_init(&vxlan->hash_lock);
@@ -2499,6 +2575,26 @@ static void vxlan_setup(struct net_device *dev)
2499 INIT_HLIST_HEAD(&vxlan->fdb_head[h]); 2575 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2500} 2576}
2501 2577
2578static void vxlan_ether_setup(struct net_device *dev)
2579{
2580 eth_hw_addr_random(dev);
2581 ether_setup(dev);
2582 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2583 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2584 dev->netdev_ops = &vxlan_netdev_ether_ops;
2585}
2586
2587static void vxlan_raw_setup(struct net_device *dev)
2588{
2589 dev->type = ARPHRD_NONE;
2590 dev->hard_header_len = 0;
2591 dev->addr_len = 0;
2592 dev->mtu = ETH_DATA_LEN;
2593 dev->tx_queue_len = 1000;
2594 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2595 dev->netdev_ops = &vxlan_netdev_raw_ops;
2596}
2597
2502static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { 2598static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2503 [IFLA_VXLAN_ID] = { .type = NLA_U32 }, 2599 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2504 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 2600 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
@@ -2525,6 +2621,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2525 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 }, 2621 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2526 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 }, 2622 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2527 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, }, 2623 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2624 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
2528 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, 2625 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2529}; 2626};
2530 2627
@@ -2643,21 +2740,19 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
2643 atomic_set(&vs->refcnt, 1); 2740 atomic_set(&vs->refcnt, 1);
2644 vs->flags = (flags & VXLAN_F_RCV_FLAGS); 2741 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2645 2742
2646 /* Initialize the vxlan udp offloads structure */
2647 vs->udp_offloads.port = port;
2648 vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
2649 vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
2650
2651 spin_lock(&vn->sock_lock); 2743 spin_lock(&vn->sock_lock);
2652 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2744 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2653 vxlan_notify_add_rx_port(vs); 2745 vxlan_notify_add_rx_port(vs);
2654 spin_unlock(&vn->sock_lock); 2746 spin_unlock(&vn->sock_lock);
2655 2747
2656 /* Mark socket as an encapsulation socket. */ 2748 /* Mark socket as an encapsulation socket. */
2749 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
2657 tunnel_cfg.sk_user_data = vs; 2750 tunnel_cfg.sk_user_data = vs;
2658 tunnel_cfg.encap_type = 1; 2751 tunnel_cfg.encap_type = 1;
2659 tunnel_cfg.encap_rcv = vxlan_rcv; 2752 tunnel_cfg.encap_rcv = vxlan_rcv;
2660 tunnel_cfg.encap_destroy = NULL; 2753 tunnel_cfg.encap_destroy = NULL;
2754 tunnel_cfg.gro_receive = vxlan_gro_receive;
2755 tunnel_cfg.gro_complete = vxlan_gro_complete;
2661 2756
2662 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 2757 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2663 2758
@@ -2725,6 +2820,21 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2725 __be16 default_port = vxlan->cfg.dst_port; 2820 __be16 default_port = vxlan->cfg.dst_port;
2726 struct net_device *lowerdev = NULL; 2821 struct net_device *lowerdev = NULL;
2727 2822
2823 if (conf->flags & VXLAN_F_GPE) {
2824 if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
2825 return -EINVAL;
2826 /* For now, allow GPE only together with COLLECT_METADATA.
2827 * This can be relaxed later; in such case, the other side
2828 * of the PtP link will have to be provided.
2829 */
2830 if (!(conf->flags & VXLAN_F_COLLECT_METADATA))
2831 return -EINVAL;
2832
2833 vxlan_raw_setup(dev);
2834 } else {
2835 vxlan_ether_setup(dev);
2836 }
2837
2728 vxlan->net = src_net; 2838 vxlan->net = src_net;
2729 2839
2730 dst->remote_vni = conf->vni; 2840 dst->remote_vni = conf->vni;
@@ -2786,8 +2896,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2786 dev->needed_headroom = needed_headroom; 2896 dev->needed_headroom = needed_headroom;
2787 2897
2788 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 2898 memcpy(&vxlan->cfg, conf, sizeof(*conf));
2789 if (!vxlan->cfg.dst_port) 2899 if (!vxlan->cfg.dst_port) {
2790 vxlan->cfg.dst_port = default_port; 2900 if (conf->flags & VXLAN_F_GPE)
2901 vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
2902 else
2903 vxlan->cfg.dst_port = default_port;
2904 }
2791 vxlan->flags |= conf->flags; 2905 vxlan->flags |= conf->flags;
2792 2906
2793 if (!vxlan->cfg.age_interval) 2907 if (!vxlan->cfg.age_interval)
@@ -2958,6 +3072,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2958 if (data[IFLA_VXLAN_GBP]) 3072 if (data[IFLA_VXLAN_GBP])
2959 conf.flags |= VXLAN_F_GBP; 3073 conf.flags |= VXLAN_F_GBP;
2960 3074
3075 if (data[IFLA_VXLAN_GPE])
3076 conf.flags |= VXLAN_F_GPE;
3077
2961 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) 3078 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
2962 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; 3079 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
2963 3080
@@ -2974,6 +3091,10 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2974 case -EEXIST: 3091 case -EEXIST:
2975 pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni)); 3092 pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
2976 break; 3093 break;
3094
3095 case -EINVAL:
3096 pr_info("unsupported combination of extensions\n");
3097 break;
2977 } 3098 }
2978 3099
2979 return err; 3100 return err;
@@ -3101,6 +3222,10 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
3101 nla_put_flag(skb, IFLA_VXLAN_GBP)) 3222 nla_put_flag(skb, IFLA_VXLAN_GBP))
3102 goto nla_put_failure; 3223 goto nla_put_failure;
3103 3224
3225 if (vxlan->flags & VXLAN_F_GPE &&
3226 nla_put_flag(skb, IFLA_VXLAN_GPE))
3227 goto nla_put_failure;
3228
3104 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL && 3229 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
3105 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL)) 3230 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
3106 goto nla_put_failure; 3231 goto nla_put_failure;
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 15f057ed41ad..70ecd82d674d 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -440,7 +440,7 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
440 rx_status.rate_idx = rate; 440 rx_status.rate_idx = rate;
441 441
442 rx_status.freq = adm8211_channels[priv->channel - 1].center_freq; 442 rx_status.freq = adm8211_channels[priv->channel - 1].center_freq;
443 rx_status.band = IEEE80211_BAND_2GHZ; 443 rx_status.band = NL80211_BAND_2GHZ;
444 444
445 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 445 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
446 ieee80211_rx_irqsafe(dev, skb); 446 ieee80211_rx_irqsafe(dev, skb);
@@ -1894,7 +1894,7 @@ static int adm8211_probe(struct pci_dev *pdev,
1894 1894
1895 priv->channel = 1; 1895 priv->channel = 1;
1896 1896
1897 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 1897 dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
1898 1898
1899 err = ieee80211_register_hw(dev); 1899 err = ieee80211_register_hw(dev);
1900 if (err) { 1900 if (err) {
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 3b343c63aa52..8aded24bcdf4 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1471,12 +1471,12 @@ static int ar5523_init_modes(struct ar5523 *ar)
1471 memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels)); 1471 memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels));
1472 memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates)); 1472 memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates));
1473 1473
1474 ar->band.band = IEEE80211_BAND_2GHZ; 1474 ar->band.band = NL80211_BAND_2GHZ;
1475 ar->band.channels = ar->channels; 1475 ar->band.channels = ar->channels;
1476 ar->band.n_channels = ARRAY_SIZE(ar5523_channels); 1476 ar->band.n_channels = ARRAY_SIZE(ar5523_channels);
1477 ar->band.bitrates = ar->rates; 1477 ar->band.bitrates = ar->rates;
1478 ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates); 1478 ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates);
1479 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band; 1479 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = &ar->band;
1480 return 0; 1480 return 0;
1481} 1481}
1482 1482
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 65ef483ebf50..da7a7c8dafb2 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -185,7 +185,7 @@ struct ath_common {
185 bool bt_ant_diversity; 185 bool bt_ant_diversity;
186 186
187 int last_rssi; 187 int last_rssi;
188 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 188 struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
189}; 189};
190 190
191static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common) 191static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common)
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index edf3629288bc..7212802eb327 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -411,7 +411,8 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
411 411
412 lockdep_assert_held(&ar_pci->ce_lock); 412 lockdep_assert_held(&ar_pci->ce_lock);
413 413
414 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) 414 if ((pipe->id != 5) &&
415 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
415 return -ENOSPC; 416 return -ENOSPC;
416 417
417 desc->addr = __cpu_to_le32(paddr); 418 desc->addr = __cpu_to_le32(paddr);
@@ -425,6 +426,19 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
425 return 0; 426 return 0;
426} 427}
427 428
429void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
430{
431 struct ath10k *ar = pipe->ar;
432 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
433 unsigned int nentries_mask = dest_ring->nentries_mask;
434 unsigned int write_index = dest_ring->write_index;
435 u32 ctrl_addr = pipe->ctrl_addr;
436
437 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
438 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
439 dest_ring->write_index = write_index;
440}
441
428int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) 442int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
429{ 443{
430 struct ath10k *ar = pipe->ar; 444 struct ath10k *ar = pipe->ar;
@@ -444,14 +458,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
444 */ 458 */
445int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, 459int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
446 void **per_transfer_contextp, 460 void **per_transfer_contextp,
447 u32 *bufferp, 461 unsigned int *nbytesp)
448 unsigned int *nbytesp,
449 unsigned int *transfer_idp,
450 unsigned int *flagsp)
451{ 462{
452 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; 463 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
453 unsigned int nentries_mask = dest_ring->nentries_mask; 464 unsigned int nentries_mask = dest_ring->nentries_mask;
454 struct ath10k *ar = ce_state->ar;
455 unsigned int sw_index = dest_ring->sw_index; 465 unsigned int sw_index = dest_ring->sw_index;
456 466
457 struct ce_desc *base = dest_ring->base_addr_owner_space; 467 struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -476,21 +486,17 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
476 desc->nbytes = 0; 486 desc->nbytes = 0;
477 487
478 /* Return data from completed destination descriptor */ 488 /* Return data from completed destination descriptor */
479 *bufferp = __le32_to_cpu(sdesc.addr);
480 *nbytesp = nbytes; 489 *nbytesp = nbytes;
481 *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
482
483 if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
484 *flagsp = CE_RECV_FLAG_SWAPPED;
485 else
486 *flagsp = 0;
487 490
488 if (per_transfer_contextp) 491 if (per_transfer_contextp)
489 *per_transfer_contextp = 492 *per_transfer_contextp =
490 dest_ring->per_transfer_context[sw_index]; 493 dest_ring->per_transfer_context[sw_index];
491 494
492 /* sanity */ 495 /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
493 dest_ring->per_transfer_context[sw_index] = NULL; 496 * So update transfer context all CEs except CE5.
497 */
498 if (ce_state->id != 5)
499 dest_ring->per_transfer_context[sw_index] = NULL;
494 500
495 /* Update sw_index */ 501 /* Update sw_index */
496 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 502 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
@@ -501,10 +507,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
501 507
502int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, 508int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
503 void **per_transfer_contextp, 509 void **per_transfer_contextp,
504 u32 *bufferp, 510 unsigned int *nbytesp)
505 unsigned int *nbytesp,
506 unsigned int *transfer_idp,
507 unsigned int *flagsp)
508{ 511{
509 struct ath10k *ar = ce_state->ar; 512 struct ath10k *ar = ce_state->ar;
510 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 513 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -513,8 +516,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
513 spin_lock_bh(&ar_pci->ce_lock); 516 spin_lock_bh(&ar_pci->ce_lock);
514 ret = ath10k_ce_completed_recv_next_nolock(ce_state, 517 ret = ath10k_ce_completed_recv_next_nolock(ce_state,
515 per_transfer_contextp, 518 per_transfer_contextp,
516 bufferp, nbytesp, 519 nbytesp);
517 transfer_idp, flagsp);
518 spin_unlock_bh(&ar_pci->ce_lock); 520 spin_unlock_bh(&ar_pci->ce_lock);
519 521
520 return ret; 522 return ret;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 47b734ce7ecf..25cafcfd6b12 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -22,7 +22,7 @@
22 22
23/* Maximum number of Copy Engine's supported */ 23/* Maximum number of Copy Engine's supported */
24#define CE_COUNT_MAX 12 24#define CE_COUNT_MAX 12
25#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096 25#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192
26 26
27/* Descriptor rings must be aligned to this boundary */ 27/* Descriptor rings must be aligned to this boundary */
28#define CE_DESC_RING_ALIGN 8 28#define CE_DESC_RING_ALIGN 8
@@ -166,6 +166,7 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
166int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe); 166int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
167int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); 167int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
168int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); 168int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
169void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
169 170
170/* recv flags */ 171/* recv flags */
171/* Data is byte-swapped */ 172/* Data is byte-swapped */
@@ -177,10 +178,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
177 */ 178 */
178int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, 179int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
179 void **per_transfer_contextp, 180 void **per_transfer_contextp,
180 u32 *bufferp, 181 unsigned int *nbytesp);
181 unsigned int *nbytesp,
182 unsigned int *transfer_idp,
183 unsigned int *flagsp);
184/* 182/*
185 * Supply data for the next completed unprocessed send descriptor. 183 * Supply data for the next completed unprocessed send descriptor.
186 * Pops 1 completed send buffer from Source ring. 184 * Pops 1 completed send buffer from Source ring.
@@ -212,10 +210,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
212 210
213int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, 211int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
214 void **per_transfer_contextp, 212 void **per_transfer_contextp,
215 u32 *bufferp, 213 unsigned int *nbytesp);
216 unsigned int *nbytesp,
217 unsigned int *transfer_idp,
218 unsigned int *flagsp);
219 214
220/* 215/*
221 * Support clean shutdown by allowing the caller to cancel 216 * Support clean shutdown by allowing the caller to cancel
@@ -416,6 +411,8 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
416 (((int)(toidx)-(int)(fromidx)) & (nentries_mask)) 411 (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
417 412
418#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) 413#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
414#define CE_RING_IDX_ADD(nentries_mask, idx, num) \
415 (((idx) + (num)) & (nentries_mask))
419 416
420#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ 417#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
421 ar->regs->ce_wrap_intr_sum_host_msi_lsb 418 ar->regs->ce_wrap_intr_sum_host_msi_lsb
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index c84c2d30ef1f..b2c7fe3d30a4 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -60,6 +60,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
60 .channel_counters_freq_hz = 88000, 60 .channel_counters_freq_hz = 88000,
61 .max_probe_resp_desc_thres = 0, 61 .max_probe_resp_desc_thres = 0,
62 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, 62 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
63 .cal_data_len = 2116,
63 .fw = { 64 .fw = {
64 .dir = QCA988X_HW_2_0_FW_DIR, 65 .dir = QCA988X_HW_2_0_FW_DIR,
65 .fw = QCA988X_HW_2_0_FW_FILE, 66 .fw = QCA988X_HW_2_0_FW_FILE,
@@ -78,6 +79,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
78 .otp_exe_param = 0, 79 .otp_exe_param = 0,
79 .channel_counters_freq_hz = 88000, 80 .channel_counters_freq_hz = 88000,
80 .max_probe_resp_desc_thres = 0, 81 .max_probe_resp_desc_thres = 0,
82 .cal_data_len = 8124,
81 .fw = { 83 .fw = {
82 .dir = QCA6174_HW_2_1_FW_DIR, 84 .dir = QCA6174_HW_2_1_FW_DIR,
83 .fw = QCA6174_HW_2_1_FW_FILE, 85 .fw = QCA6174_HW_2_1_FW_FILE,
@@ -97,6 +99,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
97 .channel_counters_freq_hz = 88000, 99 .channel_counters_freq_hz = 88000,
98 .max_probe_resp_desc_thres = 0, 100 .max_probe_resp_desc_thres = 0,
99 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, 101 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
102 .cal_data_len = 8124,
100 .fw = { 103 .fw = {
101 .dir = QCA6174_HW_2_1_FW_DIR, 104 .dir = QCA6174_HW_2_1_FW_DIR,
102 .fw = QCA6174_HW_2_1_FW_FILE, 105 .fw = QCA6174_HW_2_1_FW_FILE,
@@ -116,6 +119,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
116 .channel_counters_freq_hz = 88000, 119 .channel_counters_freq_hz = 88000,
117 .max_probe_resp_desc_thres = 0, 120 .max_probe_resp_desc_thres = 0,
118 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, 121 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
122 .cal_data_len = 8124,
119 .fw = { 123 .fw = {
120 .dir = QCA6174_HW_3_0_FW_DIR, 124 .dir = QCA6174_HW_3_0_FW_DIR,
121 .fw = QCA6174_HW_3_0_FW_FILE, 125 .fw = QCA6174_HW_3_0_FW_FILE,
@@ -135,6 +139,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
135 .channel_counters_freq_hz = 88000, 139 .channel_counters_freq_hz = 88000,
136 .max_probe_resp_desc_thres = 0, 140 .max_probe_resp_desc_thres = 0,
137 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, 141 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
142 .cal_data_len = 8124,
138 .fw = { 143 .fw = {
139 /* uses same binaries as hw3.0 */ 144 /* uses same binaries as hw3.0 */
140 .dir = QCA6174_HW_3_0_FW_DIR, 145 .dir = QCA6174_HW_3_0_FW_DIR,
@@ -156,11 +161,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
156 .channel_counters_freq_hz = 150000, 161 .channel_counters_freq_hz = 150000,
157 .max_probe_resp_desc_thres = 24, 162 .max_probe_resp_desc_thres = 24,
158 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, 163 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
159 .num_msdu_desc = 1424,
160 .qcache_active_peers = 50,
161 .tx_chain_mask = 0xf, 164 .tx_chain_mask = 0xf,
162 .rx_chain_mask = 0xf, 165 .rx_chain_mask = 0xf,
163 .max_spatial_stream = 4, 166 .max_spatial_stream = 4,
167 .cal_data_len = 12064,
164 .fw = { 168 .fw = {
165 .dir = QCA99X0_HW_2_0_FW_DIR, 169 .dir = QCA99X0_HW_2_0_FW_DIR,
166 .fw = QCA99X0_HW_2_0_FW_FILE, 170 .fw = QCA99X0_HW_2_0_FW_FILE,
@@ -179,6 +183,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
179 .otp_exe_param = 0, 183 .otp_exe_param = 0,
180 .channel_counters_freq_hz = 88000, 184 .channel_counters_freq_hz = 88000,
181 .max_probe_resp_desc_thres = 0, 185 .max_probe_resp_desc_thres = 0,
186 .cal_data_len = 8124,
182 .fw = { 187 .fw = {
183 .dir = QCA9377_HW_1_0_FW_DIR, 188 .dir = QCA9377_HW_1_0_FW_DIR,
184 .fw = QCA9377_HW_1_0_FW_FILE, 189 .fw = QCA9377_HW_1_0_FW_FILE,
@@ -197,6 +202,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
197 .otp_exe_param = 0, 202 .otp_exe_param = 0,
198 .channel_counters_freq_hz = 88000, 203 .channel_counters_freq_hz = 88000,
199 .max_probe_resp_desc_thres = 0, 204 .max_probe_resp_desc_thres = 0,
205 .cal_data_len = 8124,
200 .fw = { 206 .fw = {
201 .dir = QCA9377_HW_1_0_FW_DIR, 207 .dir = QCA9377_HW_1_0_FW_DIR,
202 .fw = QCA9377_HW_1_0_FW_FILE, 208 .fw = QCA9377_HW_1_0_FW_FILE,
@@ -217,11 +223,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
217 .channel_counters_freq_hz = 125000, 223 .channel_counters_freq_hz = 125000,
218 .max_probe_resp_desc_thres = 24, 224 .max_probe_resp_desc_thres = 24,
219 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, 225 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
220 .num_msdu_desc = 2500,
221 .qcache_active_peers = 35,
222 .tx_chain_mask = 0x3, 226 .tx_chain_mask = 0x3,
223 .rx_chain_mask = 0x3, 227 .rx_chain_mask = 0x3,
224 .max_spatial_stream = 2, 228 .max_spatial_stream = 2,
229 .cal_data_len = 12064,
225 .fw = { 230 .fw = {
226 .dir = QCA4019_HW_1_0_FW_DIR, 231 .dir = QCA4019_HW_1_0_FW_DIR,
227 .fw = QCA4019_HW_1_0_FW_FILE, 232 .fw = QCA4019_HW_1_0_FW_FILE,
@@ -466,18 +471,18 @@ exit:
466 return ret; 471 return ret;
467} 472}
468 473
469static int ath10k_download_cal_file(struct ath10k *ar) 474static int ath10k_download_cal_file(struct ath10k *ar,
475 const struct firmware *file)
470{ 476{
471 int ret; 477 int ret;
472 478
473 if (!ar->cal_file) 479 if (!file)
474 return -ENOENT; 480 return -ENOENT;
475 481
476 if (IS_ERR(ar->cal_file)) 482 if (IS_ERR(file))
477 return PTR_ERR(ar->cal_file); 483 return PTR_ERR(file);
478 484
479 ret = ath10k_download_board_data(ar, ar->cal_file->data, 485 ret = ath10k_download_board_data(ar, file->data, file->size);
480 ar->cal_file->size);
481 if (ret) { 486 if (ret) {
482 ath10k_err(ar, "failed to download cal_file data: %d\n", ret); 487 ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
483 return ret; 488 return ret;
@@ -488,7 +493,7 @@ static int ath10k_download_cal_file(struct ath10k *ar)
488 return 0; 493 return 0;
489} 494}
490 495
491static int ath10k_download_cal_dt(struct ath10k *ar) 496static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
492{ 497{
493 struct device_node *node; 498 struct device_node *node;
494 int data_len; 499 int data_len;
@@ -502,13 +507,12 @@ static int ath10k_download_cal_dt(struct ath10k *ar)
502 */ 507 */
503 return -ENOENT; 508 return -ENOENT;
504 509
505 if (!of_get_property(node, "qcom,ath10k-calibration-data", 510 if (!of_get_property(node, dt_name, &data_len)) {
506 &data_len)) {
507 /* The calibration data node is optional */ 511 /* The calibration data node is optional */
508 return -ENOENT; 512 return -ENOENT;
509 } 513 }
510 514
511 if (data_len != QCA988X_CAL_DATA_LEN) { 515 if (data_len != ar->hw_params.cal_data_len) {
512 ath10k_warn(ar, "invalid calibration data length in DT: %d\n", 516 ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
513 data_len); 517 data_len);
514 ret = -EMSGSIZE; 518 ret = -EMSGSIZE;
@@ -521,8 +525,7 @@ static int ath10k_download_cal_dt(struct ath10k *ar)
521 goto out; 525 goto out;
522 } 526 }
523 527
524 ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data", 528 ret = of_property_read_u8_array(node, dt_name, data, data_len);
525 data, data_len);
526 if (ret) { 529 if (ret) {
527 ath10k_warn(ar, "failed to read calibration data from DT: %d\n", 530 ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
528 ret); 531 ret);
@@ -726,6 +729,14 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
726{ 729{
727 char filename[100]; 730 char filename[100];
728 731
732 /* pre-cal-<bus>-<id>.bin */
733 scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin",
734 ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
735
736 ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
737 if (!IS_ERR(ar->pre_cal_file))
738 goto success;
739
729 /* cal-<bus>-<id>.bin */ 740 /* cal-<bus>-<id>.bin */
730 scnprintf(filename, sizeof(filename), "cal-%s-%s.bin", 741 scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
731 ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); 742 ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
@@ -734,7 +745,7 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
734 if (IS_ERR(ar->cal_file)) 745 if (IS_ERR(ar->cal_file))
735 /* calibration file is optional, don't print any warnings */ 746 /* calibration file is optional, don't print any warnings */
736 return PTR_ERR(ar->cal_file); 747 return PTR_ERR(ar->cal_file);
737 748success:
738 ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n", 749 ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
739 ATH10K_FW_DIR, filename); 750 ATH10K_FW_DIR, filename);
740 751
@@ -1258,11 +1269,77 @@ success:
1258 return 0; 1269 return 0;
1259} 1270}
1260 1271
1272static int ath10k_core_pre_cal_download(struct ath10k *ar)
1273{
1274 int ret;
1275
1276 ret = ath10k_download_cal_file(ar, ar->pre_cal_file);
1277 if (ret == 0) {
1278 ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE;
1279 goto success;
1280 }
1281
1282 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1283 "boot did not find a pre calibration file, try DT next: %d\n",
1284 ret);
1285
1286 ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
1287 if (ret) {
1288 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1289 "unable to load pre cal data from DT: %d\n", ret);
1290 return ret;
1291 }
1292 ar->cal_mode = ATH10K_PRE_CAL_MODE_DT;
1293
1294success:
1295 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
1296 ath10k_cal_mode_str(ar->cal_mode));
1297
1298 return 0;
1299}
1300
1301static int ath10k_core_pre_cal_config(struct ath10k *ar)
1302{
1303 int ret;
1304
1305 ret = ath10k_core_pre_cal_download(ar);
1306 if (ret) {
1307 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1308 "failed to load pre cal data: %d\n", ret);
1309 return ret;
1310 }
1311
1312 ret = ath10k_core_get_board_id_from_otp(ar);
1313 if (ret) {
1314 ath10k_err(ar, "failed to get board id: %d\n", ret);
1315 return ret;
1316 }
1317
1318 ret = ath10k_download_and_run_otp(ar);
1319 if (ret) {
1320 ath10k_err(ar, "failed to run otp: %d\n", ret);
1321 return ret;
1322 }
1323
1324 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1325 "pre cal configuration done successfully\n");
1326
1327 return 0;
1328}
1329
1261static int ath10k_download_cal_data(struct ath10k *ar) 1330static int ath10k_download_cal_data(struct ath10k *ar)
1262{ 1331{
1263 int ret; 1332 int ret;
1264 1333
1265 ret = ath10k_download_cal_file(ar); 1334 ret = ath10k_core_pre_cal_config(ar);
1335 if (ret == 0)
1336 return 0;
1337
1338 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1339 "pre cal download procedure failed, try cal file: %d\n",
1340 ret);
1341
1342 ret = ath10k_download_cal_file(ar, ar->cal_file);
1266 if (ret == 0) { 1343 if (ret == 0) {
1267 ar->cal_mode = ATH10K_CAL_MODE_FILE; 1344 ar->cal_mode = ATH10K_CAL_MODE_FILE;
1268 goto done; 1345 goto done;
@@ -1272,7 +1349,7 @@ static int ath10k_download_cal_data(struct ath10k *ar)
1272 "boot did not find a calibration file, try DT next: %d\n", 1349 "boot did not find a calibration file, try DT next: %d\n",
1273 ret); 1350 ret);
1274 1351
1275 ret = ath10k_download_cal_dt(ar); 1352 ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
1276 if (ret == 0) { 1353 if (ret == 0) {
1277 ar->cal_mode = ATH10K_CAL_MODE_DT; 1354 ar->cal_mode = ATH10K_CAL_MODE_DT;
1278 goto done; 1355 goto done;
@@ -1509,7 +1586,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
1509 case ATH10K_FW_WMI_OP_VERSION_10_1: 1586 case ATH10K_FW_WMI_OP_VERSION_10_1:
1510 case ATH10K_FW_WMI_OP_VERSION_10_2: 1587 case ATH10K_FW_WMI_OP_VERSION_10_2:
1511 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 1588 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
1512 if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { 1589 if (ath10k_peer_stats_enabled(ar)) {
1513 ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS; 1590 ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
1514 ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS; 1591 ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS;
1515 } else { 1592 } else {
@@ -1538,9 +1615,15 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
1538 ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS; 1615 ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
1539 ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS; 1616 ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
1540 ar->num_tids = TARGET_10_4_TGT_NUM_TIDS; 1617 ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
1541 ar->htt.max_num_pending_tx = ar->hw_params.num_msdu_desc; 1618 ar->fw_stats_req_mask = WMI_10_4_STAT_PEER |
1542 ar->fw_stats_req_mask = WMI_STAT_PEER; 1619 WMI_10_4_STAT_PEER_EXTD;
1543 ar->max_spatial_stream = ar->hw_params.max_spatial_stream; 1620 ar->max_spatial_stream = ar->hw_params.max_spatial_stream;
1621
1622 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
1623 ar->fw_features))
1624 ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC;
1625 else
1626 ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
1544 break; 1627 break;
1545 case ATH10K_FW_WMI_OP_VERSION_UNSET: 1628 case ATH10K_FW_WMI_OP_VERSION_UNSET:
1546 case ATH10K_FW_WMI_OP_VERSION_MAX: 1629 case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1578,6 +1661,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
1578int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) 1661int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
1579{ 1662{
1580 int status; 1663 int status;
1664 u32 val;
1581 1665
1582 lockdep_assert_held(&ar->conf_mutex); 1666 lockdep_assert_held(&ar->conf_mutex);
1583 1667
@@ -1698,6 +1782,21 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
1698 ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", 1782 ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
1699 ar->hw->wiphy->fw_version); 1783 ar->hw->wiphy->fw_version);
1700 1784
1785 if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) {
1786 val = 0;
1787 if (ath10k_peer_stats_enabled(ar))
1788 val = WMI_10_4_PEER_STATS;
1789
1790 status = ath10k_wmi_ext_resource_config(ar,
1791 WMI_HOST_PLATFORM_HIGH_PERF, val);
1792 if (status) {
1793 ath10k_err(ar,
1794 "failed to send ext resource cfg command : %d\n",
1795 status);
1796 goto err_hif_stop;
1797 }
1798 }
1799
1701 status = ath10k_wmi_cmd_init(ar); 1800 status = ath10k_wmi_cmd_init(ar);
1702 if (status) { 1801 if (status) {
1703 ath10k_err(ar, "could not send WMI init command (%d)\n", 1802 ath10k_err(ar, "could not send WMI init command (%d)\n",
@@ -1834,11 +1933,20 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
1834 1933
1835 ath10k_debug_print_hwfw_info(ar); 1934 ath10k_debug_print_hwfw_info(ar);
1836 1935
1936 ret = ath10k_core_pre_cal_download(ar);
1937 if (ret) {
1938 /* pre calibration data download is not necessary
1939 * for all the chipsets. Ignore failures and continue.
1940 */
1941 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1942 "could not load pre cal data: %d\n", ret);
1943 }
1944
1837 ret = ath10k_core_get_board_id_from_otp(ar); 1945 ret = ath10k_core_get_board_id_from_otp(ar);
1838 if (ret && ret != -EOPNOTSUPP) { 1946 if (ret && ret != -EOPNOTSUPP) {
1839 ath10k_err(ar, "failed to get board id from otp: %d\n", 1947 ath10k_err(ar, "failed to get board id from otp: %d\n",
1840 ret); 1948 ret);
1841 return ret; 1949 goto err_free_firmware_files;
1842 } 1950 }
1843 1951
1844 ret = ath10k_core_fetch_board_file(ar); 1952 ret = ath10k_core_fetch_board_file(ar);
@@ -2048,7 +2156,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
2048 2156
2049 mutex_init(&ar->conf_mutex); 2157 mutex_init(&ar->conf_mutex);
2050 spin_lock_init(&ar->data_lock); 2158 spin_lock_init(&ar->data_lock);
2159 spin_lock_init(&ar->txqs_lock);
2051 2160
2161 INIT_LIST_HEAD(&ar->txqs);
2052 INIT_LIST_HEAD(&ar->peers); 2162 INIT_LIST_HEAD(&ar->peers);
2053 init_waitqueue_head(&ar->peer_mapping_wq); 2163 init_waitqueue_head(&ar->peer_mapping_wq);
2054 init_waitqueue_head(&ar->htt.empty_tx_wq); 2164 init_waitqueue_head(&ar->htt.empty_tx_wq);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index a62b62a62266..362bbed8f0e9 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -98,6 +98,7 @@ struct ath10k_skb_cb {
98 u8 eid; 98 u8 eid;
99 u16 msdu_id; 99 u16 msdu_id;
100 struct ieee80211_vif *vif; 100 struct ieee80211_vif *vif;
101 struct ieee80211_txq *txq;
101} __packed; 102} __packed;
102 103
103struct ath10k_skb_rxcb { 104struct ath10k_skb_rxcb {
@@ -297,6 +298,9 @@ struct ath10k_dfs_stats {
297 298
298struct ath10k_peer { 299struct ath10k_peer {
299 struct list_head list; 300 struct list_head list;
301 struct ieee80211_vif *vif;
302 struct ieee80211_sta *sta;
303
300 int vdev_id; 304 int vdev_id;
301 u8 addr[ETH_ALEN]; 305 u8 addr[ETH_ALEN];
302 DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS); 306 DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
@@ -305,6 +309,12 @@ struct ath10k_peer {
305 struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; 309 struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
306}; 310};
307 311
312struct ath10k_txq {
313 struct list_head list;
314 unsigned long num_fw_queued;
315 unsigned long num_push_allowed;
316};
317
308struct ath10k_sta { 318struct ath10k_sta {
309 struct ath10k_vif *arvif; 319 struct ath10k_vif *arvif;
310 320
@@ -313,6 +323,7 @@ struct ath10k_sta {
313 u32 bw; 323 u32 bw;
314 u32 nss; 324 u32 nss;
315 u32 smps; 325 u32 smps;
326 u16 peer_id;
316 327
317 struct work_struct update_wk; 328 struct work_struct update_wk;
318 329
@@ -335,6 +346,7 @@ struct ath10k_vif {
335 struct list_head list; 346 struct list_head list;
336 347
337 u32 vdev_id; 348 u32 vdev_id;
349 u16 peer_id;
338 enum wmi_vdev_type vdev_type; 350 enum wmi_vdev_type vdev_type;
339 enum wmi_vdev_subtype vdev_subtype; 351 enum wmi_vdev_subtype vdev_subtype;
340 u32 beacon_interval; 352 u32 beacon_interval;
@@ -549,12 +561,17 @@ enum ath10k_dev_flags {
549 561
550 /* Bluetooth coexistance enabled */ 562 /* Bluetooth coexistance enabled */
551 ATH10K_FLAG_BTCOEX, 563 ATH10K_FLAG_BTCOEX,
564
565 /* Per Station statistics service */
566 ATH10K_FLAG_PEER_STATS,
552}; 567};
553 568
554enum ath10k_cal_mode { 569enum ath10k_cal_mode {
555 ATH10K_CAL_MODE_FILE, 570 ATH10K_CAL_MODE_FILE,
556 ATH10K_CAL_MODE_OTP, 571 ATH10K_CAL_MODE_OTP,
557 ATH10K_CAL_MODE_DT, 572 ATH10K_CAL_MODE_DT,
573 ATH10K_PRE_CAL_MODE_FILE,
574 ATH10K_PRE_CAL_MODE_DT,
558}; 575};
559 576
560enum ath10k_crypt_mode { 577enum ath10k_crypt_mode {
@@ -573,6 +590,10 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
573 return "otp"; 590 return "otp";
574 case ATH10K_CAL_MODE_DT: 591 case ATH10K_CAL_MODE_DT:
575 return "dt"; 592 return "dt";
593 case ATH10K_PRE_CAL_MODE_FILE:
594 return "pre-cal-file";
595 case ATH10K_PRE_CAL_MODE_DT:
596 return "pre-cal-dt";
576 } 597 }
577 598
578 return "unknown"; 599 return "unknown";
@@ -680,11 +701,10 @@ struct ath10k {
680 /* The padding bytes's location is different on various chips */ 701 /* The padding bytes's location is different on various chips */
681 enum ath10k_hw_4addr_pad hw_4addr_pad; 702 enum ath10k_hw_4addr_pad hw_4addr_pad;
682 703
683 u32 num_msdu_desc;
684 u32 qcache_active_peers;
685 u32 tx_chain_mask; 704 u32 tx_chain_mask;
686 u32 rx_chain_mask; 705 u32 rx_chain_mask;
687 u32 max_spatial_stream; 706 u32 max_spatial_stream;
707 u32 cal_data_len;
688 708
689 struct ath10k_hw_params_fw { 709 struct ath10k_hw_params_fw {
690 const char *dir; 710 const char *dir;
@@ -708,6 +728,7 @@ struct ath10k {
708 const void *firmware_data; 728 const void *firmware_data;
709 size_t firmware_len; 729 size_t firmware_len;
710 730
731 const struct firmware *pre_cal_file;
711 const struct firmware *cal_file; 732 const struct firmware *cal_file;
712 733
713 struct { 734 struct {
@@ -744,7 +765,7 @@ struct ath10k {
744 } scan; 765 } scan;
745 766
746 struct { 767 struct {
747 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 768 struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
748 } mac; 769 } mac;
749 770
750 /* should never be NULL; needed for regular htt rx */ 771 /* should never be NULL; needed for regular htt rx */
@@ -756,6 +777,9 @@ struct ath10k {
756 /* current operating channel definition */ 777 /* current operating channel definition */
757 struct cfg80211_chan_def chandef; 778 struct cfg80211_chan_def chandef;
758 779
780 /* currently configured operating channel in firmware */
781 struct ieee80211_channel *tgt_oper_chan;
782
759 unsigned long long free_vdev_map; 783 unsigned long long free_vdev_map;
760 struct ath10k_vif *monitor_arvif; 784 struct ath10k_vif *monitor_arvif;
761 bool monitor; 785 bool monitor;
@@ -786,9 +810,13 @@ struct ath10k {
786 810
787 /* protects shared structure data */ 811 /* protects shared structure data */
788 spinlock_t data_lock; 812 spinlock_t data_lock;
813 /* protects: ar->txqs, artxq->list */
814 spinlock_t txqs_lock;
789 815
816 struct list_head txqs;
790 struct list_head arvifs; 817 struct list_head arvifs;
791 struct list_head peers; 818 struct list_head peers;
819 struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
792 wait_queue_head_t peer_mapping_wq; 820 wait_queue_head_t peer_mapping_wq;
793 821
794 /* protected by conf_mutex */ 822 /* protected by conf_mutex */
@@ -876,6 +904,15 @@ struct ath10k {
876 u8 drv_priv[0] __aligned(sizeof(void *)); 904 u8 drv_priv[0] __aligned(sizeof(void *));
877}; 905};
878 906
907static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
908{
909 if (test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) &&
910 test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
911 return true;
912
913 return false;
914}
915
879struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, 916struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
880 enum ath10k_bus bus, 917 enum ath10k_bus bus,
881 enum ath10k_hw_rev hw_rev, 918 enum ath10k_hw_rev hw_rev,
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 076d29b53ddf..76bbe17b25b6 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -127,6 +127,7 @@ EXPORT_SYMBOL(ath10k_info);
127void ath10k_debug_print_hwfw_info(struct ath10k *ar) 127void ath10k_debug_print_hwfw_info(struct ath10k *ar)
128{ 128{
129 char fw_features[128] = {}; 129 char fw_features[128] = {};
130 u32 crc = 0;
130 131
131 ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features)); 132 ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
132 133
@@ -143,11 +144,14 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
143 config_enabled(CONFIG_ATH10K_DFS_CERTIFIED), 144 config_enabled(CONFIG_ATH10K_DFS_CERTIFIED),
144 config_enabled(CONFIG_NL80211_TESTMODE)); 145 config_enabled(CONFIG_NL80211_TESTMODE));
145 146
147 if (ar->firmware)
148 crc = crc32_le(0, ar->firmware->data, ar->firmware->size);
149
146 ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n", 150 ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n",
147 ar->hw->wiphy->fw_version, 151 ar->hw->wiphy->fw_version,
148 ar->fw_api, 152 ar->fw_api,
149 fw_features, 153 fw_features,
150 crc32_le(0, ar->firmware->data, ar->firmware->size)); 154 crc);
151} 155}
152 156
153void ath10k_debug_print_board_info(struct ath10k *ar) 157void ath10k_debug_print_board_info(struct ath10k *ar)
@@ -319,7 +323,7 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
319void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) 323void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
320{ 324{
321 struct ath10k_fw_stats stats = {}; 325 struct ath10k_fw_stats stats = {};
322 bool is_start, is_started, is_end, peer_stats_svc; 326 bool is_start, is_started, is_end;
323 size_t num_peers; 327 size_t num_peers;
324 size_t num_vdevs; 328 size_t num_vdevs;
325 int ret; 329 int ret;
@@ -346,13 +350,11 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
346 * b) consume stat update events until another one with pdev stats is 350 * b) consume stat update events until another one with pdev stats is
347 * delivered which is treated as end-of-data and is itself discarded 351 * delivered which is treated as end-of-data and is itself discarded
348 */ 352 */
349 353 if (ath10k_peer_stats_enabled(ar))
350 peer_stats_svc = test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map);
351 if (peer_stats_svc)
352 ath10k_sta_update_rx_duration(ar, &stats.peers); 354 ath10k_sta_update_rx_duration(ar, &stats.peers);
353 355
354 if (ar->debug.fw_stats_done) { 356 if (ar->debug.fw_stats_done) {
355 if (!peer_stats_svc) 357 if (!ath10k_peer_stats_enabled(ar))
356 ath10k_warn(ar, "received unsolicited stats update event\n"); 358 ath10k_warn(ar, "received unsolicited stats update event\n");
357 359
358 goto free; 360 goto free;
@@ -1447,7 +1449,7 @@ static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
1447 goto err; 1449 goto err;
1448 } 1450 }
1449 1451
1450 buf = vmalloc(QCA988X_CAL_DATA_LEN); 1452 buf = vmalloc(ar->hw_params.cal_data_len);
1451 if (!buf) { 1453 if (!buf) {
1452 ret = -ENOMEM; 1454 ret = -ENOMEM;
1453 goto err; 1455 goto err;
@@ -1462,7 +1464,7 @@ static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
1462 } 1464 }
1463 1465
1464 ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf, 1466 ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
1465 QCA988X_CAL_DATA_LEN); 1467 ar->hw_params.cal_data_len);
1466 if (ret) { 1468 if (ret) {
1467 ath10k_warn(ar, "failed to read calibration data: %d\n", ret); 1469 ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
1468 goto err_vfree; 1470 goto err_vfree;
@@ -1487,10 +1489,11 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
1487 char __user *user_buf, 1489 char __user *user_buf,
1488 size_t count, loff_t *ppos) 1490 size_t count, loff_t *ppos)
1489{ 1491{
1492 struct ath10k *ar = file->private_data;
1490 void *buf = file->private_data; 1493 void *buf = file->private_data;
1491 1494
1492 return simple_read_from_buffer(user_buf, count, ppos, 1495 return simple_read_from_buffer(user_buf, count, ppos,
1493 buf, QCA988X_CAL_DATA_LEN); 1496 buf, ar->hw_params.cal_data_len);
1494} 1497}
1495 1498
1496static int ath10k_debug_cal_data_release(struct inode *inode, 1499static int ath10k_debug_cal_data_release(struct inode *inode,
@@ -2019,7 +2022,12 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
2019 goto out; 2022 goto out;
2020 } 2023 }
2021 2024
2022 if (filter && (filter != ar->debug.pktlog_filter)) { 2025 if (filter == ar->debug.pktlog_filter) {
2026 ret = count;
2027 goto out;
2028 }
2029
2030 if (filter) {
2023 ret = ath10k_wmi_pdev_pktlog_enable(ar, filter); 2031 ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
2024 if (ret) { 2032 if (ret) {
2025 ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", 2033 ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
@@ -2174,6 +2182,73 @@ static const struct file_operations fops_btcoex = {
2174 .open = simple_open 2182 .open = simple_open
2175}; 2183};
2176 2184
2185static ssize_t ath10k_write_peer_stats(struct file *file,
2186 const char __user *ubuf,
2187 size_t count, loff_t *ppos)
2188{
2189 struct ath10k *ar = file->private_data;
2190 char buf[32];
2191 size_t buf_size;
2192 int ret = 0;
2193 bool val;
2194
2195 buf_size = min(count, (sizeof(buf) - 1));
2196 if (copy_from_user(buf, ubuf, buf_size))
2197 return -EFAULT;
2198
2199 buf[buf_size] = '\0';
2200
2201 if (strtobool(buf, &val) != 0)
2202 return -EINVAL;
2203
2204 mutex_lock(&ar->conf_mutex);
2205
2206 if (ar->state != ATH10K_STATE_ON &&
2207 ar->state != ATH10K_STATE_RESTARTED) {
2208 ret = -ENETDOWN;
2209 goto exit;
2210 }
2211
2212 if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val))
2213 goto exit;
2214
2215 if (val)
2216 set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
2217 else
2218 clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
2219
2220 ath10k_info(ar, "restarting firmware due to Peer stats change");
2221
2222 queue_work(ar->workqueue, &ar->restart_work);
2223 ret = count;
2224
2225exit:
2226 mutex_unlock(&ar->conf_mutex);
2227 return ret;
2228}
2229
2230static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf,
2231 size_t count, loff_t *ppos)
2232
2233{
2234 char buf[32];
2235 struct ath10k *ar = file->private_data;
2236 int len = 0;
2237
2238 mutex_lock(&ar->conf_mutex);
2239 len = scnprintf(buf, sizeof(buf) - len, "%d\n",
2240 test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags));
2241 mutex_unlock(&ar->conf_mutex);
2242
2243 return simple_read_from_buffer(ubuf, count, ppos, buf, len);
2244}
2245
2246static const struct file_operations fops_peer_stats = {
2247 .read = ath10k_read_peer_stats,
2248 .write = ath10k_write_peer_stats,
2249 .open = simple_open
2250};
2251
2177static ssize_t ath10k_debug_fw_checksums_read(struct file *file, 2252static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
2178 char __user *user_buf, 2253 char __user *user_buf,
2179 size_t count, loff_t *ppos) 2254 size_t count, loff_t *ppos)
@@ -2337,6 +2412,11 @@ int ath10k_debug_register(struct ath10k *ar)
2337 debugfs_create_file("btcoex", S_IRUGO | S_IWUSR, 2412 debugfs_create_file("btcoex", S_IRUGO | S_IWUSR,
2338 ar->debug.debugfs_phy, ar, &fops_btcoex); 2413 ar->debug.debugfs_phy, ar, &fops_btcoex);
2339 2414
2415 if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
2416 debugfs_create_file("peer_stats", S_IRUGO | S_IWUSR,
2417 ar->debug.debugfs_phy, ar,
2418 &fops_peer_stats);
2419
2340 debugfs_create_file("fw_checksums", S_IRUSR, 2420 debugfs_create_file("fw_checksums", S_IRUSR,
2341 ar->debug.debugfs_phy, ar, &fops_fw_checksums); 2421 ar->debug.debugfs_phy, ar, &fops_fw_checksums);
2342 2422
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 7561f22f10f9..17a3008d9ab1 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -149,7 +149,7 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
149 memset(&conn_resp, 0, sizeof(conn_resp)); 149 memset(&conn_resp, 0, sizeof(conn_resp));
150 150
151 conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete; 151 conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
152 conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler; 152 conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
153 153
154 /* connect to control service */ 154 /* connect to control service */
155 conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG; 155 conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 13391ea4422d..60bd9fe4b2d9 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/dmapool.h> 23#include <linux/dmapool.h>
24#include <linux/hashtable.h> 24#include <linux/hashtable.h>
25#include <linux/kfifo.h>
25#include <net/mac80211.h> 26#include <net/mac80211.h>
26 27
27#include "htc.h" 28#include "htc.h"
@@ -1461,6 +1462,14 @@ struct htt_tx_mode_switch_ind {
1461 struct htt_tx_mode_switch_record records[0]; 1462 struct htt_tx_mode_switch_record records[0];
1462} __packed; 1463} __packed;
1463 1464
1465struct htt_channel_change {
1466 u8 pad[3];
1467 __le32 freq;
1468 __le32 center_freq1;
1469 __le32 center_freq2;
1470 __le32 phymode;
1471} __packed;
1472
1464union htt_rx_pn_t { 1473union htt_rx_pn_t {
1465 /* WEP: 24-bit PN */ 1474 /* WEP: 24-bit PN */
1466 u32 pn24; 1475 u32 pn24;
@@ -1511,16 +1520,22 @@ struct htt_resp {
1511 struct htt_tx_fetch_ind tx_fetch_ind; 1520 struct htt_tx_fetch_ind tx_fetch_ind;
1512 struct htt_tx_fetch_confirm tx_fetch_confirm; 1521 struct htt_tx_fetch_confirm tx_fetch_confirm;
1513 struct htt_tx_mode_switch_ind tx_mode_switch_ind; 1522 struct htt_tx_mode_switch_ind tx_mode_switch_ind;
1523 struct htt_channel_change chan_change;
1514 }; 1524 };
1515} __packed; 1525} __packed;
1516 1526
1517/*** host side structures follow ***/ 1527/*** host side structures follow ***/
1518 1528
1519struct htt_tx_done { 1529struct htt_tx_done {
1520 u32 msdu_id; 1530 u16 msdu_id;
1521 bool discard; 1531 u16 status;
1522 bool no_ack; 1532};
1523 bool success; 1533
1534enum htt_tx_compl_state {
1535 HTT_TX_COMPL_STATE_NONE,
1536 HTT_TX_COMPL_STATE_ACK,
1537 HTT_TX_COMPL_STATE_NOACK,
1538 HTT_TX_COMPL_STATE_DISCARD,
1524}; 1539};
1525 1540
1526struct htt_peer_map_event { 1541struct htt_peer_map_event {
@@ -1641,17 +1656,20 @@ struct ath10k_htt {
1641 struct idr pending_tx; 1656 struct idr pending_tx;
1642 wait_queue_head_t empty_tx_wq; 1657 wait_queue_head_t empty_tx_wq;
1643 1658
1659 /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
1660 DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
1661
1644 /* set if host-fw communication goes haywire 1662 /* set if host-fw communication goes haywire
1645 * used to avoid further failures */ 1663 * used to avoid further failures */
1646 bool rx_confused; 1664 bool rx_confused;
1647 struct tasklet_struct rx_replenish_task; 1665 atomic_t num_mpdus_ready;
1648 1666
1649 /* This is used to group tx/rx completions separately and process them 1667 /* This is used to group tx/rx completions separately and process them
1650 * in batches to reduce cache stalls */ 1668 * in batches to reduce cache stalls */
1651 struct tasklet_struct txrx_compl_task; 1669 struct tasklet_struct txrx_compl_task;
1652 struct sk_buff_head tx_compl_q;
1653 struct sk_buff_head rx_compl_q; 1670 struct sk_buff_head rx_compl_q;
1654 struct sk_buff_head rx_in_ord_compl_q; 1671 struct sk_buff_head rx_in_ord_compl_q;
1672 struct sk_buff_head tx_fetch_ind_q;
1655 1673
1656 /* rx_status template */ 1674 /* rx_status template */
1657 struct ieee80211_rx_status rx_status; 1675 struct ieee80211_rx_status rx_status;
@@ -1667,10 +1685,13 @@ struct ath10k_htt {
1667 } txbuf; 1685 } txbuf;
1668 1686
1669 struct { 1687 struct {
1688 bool enabled;
1670 struct htt_q_state *vaddr; 1689 struct htt_q_state *vaddr;
1671 dma_addr_t paddr; 1690 dma_addr_t paddr;
1691 u16 num_push_allowed;
1672 u16 num_peers; 1692 u16 num_peers;
1673 u16 num_tids; 1693 u16 num_tids;
1694 enum htt_tx_mode_switch_mode mode;
1674 enum htt_q_depth_type type; 1695 enum htt_q_depth_type type;
1675 } tx_q_state; 1696 } tx_q_state;
1676}; 1697};
@@ -1715,7 +1736,7 @@ struct htt_rx_desc {
1715 1736
1716/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle 1737/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
1717 * aggregated traffic more nicely. */ 1738 * aggregated traffic more nicely. */
1718#define ATH10K_HTT_MAX_NUM_REFILL 16 1739#define ATH10K_HTT_MAX_NUM_REFILL 100
1719 1740
1720/* 1741/*
1721 * DMA_MAP expects the buffer to be an integral number of cache lines. 1742 * DMA_MAP expects the buffer to be an integral number of cache lines.
@@ -1743,7 +1764,8 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar);
1743void ath10k_htt_rx_free(struct ath10k_htt *htt); 1764void ath10k_htt_rx_free(struct ath10k_htt *htt);
1744 1765
1745void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); 1766void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
1746void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); 1767void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
1768bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
1747int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); 1769int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
1748int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie); 1770int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
1749int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt); 1771int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
@@ -1752,8 +1774,23 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
1752 u8 max_subfrms_ampdu, 1774 u8 max_subfrms_ampdu,
1753 u8 max_subfrms_amsdu); 1775 u8 max_subfrms_amsdu);
1754void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb); 1776void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
1777int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
1778 __le32 token,
1779 __le16 fetch_seq_num,
1780 struct htt_tx_fetch_record *records,
1781 size_t num_records);
1782
1783void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
1784 struct ieee80211_txq *txq);
1785void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
1786 struct ieee80211_txq *txq);
1787void ath10k_htt_tx_txq_sync(struct ath10k *ar);
1788void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
1789int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
1790void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
1791int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
1792 bool is_presp);
1755 1793
1756void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
1757int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); 1794int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
1758void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); 1795void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
1759int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); 1796int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index ae9b686a4e91..079fef5b7ef2 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -31,6 +31,8 @@
31/* when under memory pressure rx ring refill may fail and needs a retry */ 31/* when under memory pressure rx ring refill may fail and needs a retry */
32#define HTT_RX_RING_REFILL_RETRY_MS 50 32#define HTT_RX_RING_REFILL_RETRY_MS 50
33 33
34#define HTT_RX_RING_REFILL_RESCHED_MS 5
35
34static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); 36static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
35static void ath10k_htt_txrx_compl_task(unsigned long ptr); 37static void ath10k_htt_txrx_compl_task(unsigned long ptr);
36 38
@@ -192,7 +194,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
192 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 194 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
193 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); 195 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
194 } else if (num_deficit > 0) { 196 } else if (num_deficit > 0) {
195 tasklet_schedule(&htt->rx_replenish_task); 197 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
198 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
196 } 199 }
197 spin_unlock_bh(&htt->rx_ring.lock); 200 spin_unlock_bh(&htt->rx_ring.lock);
198} 201}
@@ -223,12 +226,11 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
223void ath10k_htt_rx_free(struct ath10k_htt *htt) 226void ath10k_htt_rx_free(struct ath10k_htt *htt)
224{ 227{
225 del_timer_sync(&htt->rx_ring.refill_retry_timer); 228 del_timer_sync(&htt->rx_ring.refill_retry_timer);
226 tasklet_kill(&htt->rx_replenish_task);
227 tasklet_kill(&htt->txrx_compl_task); 229 tasklet_kill(&htt->txrx_compl_task);
228 230
229 skb_queue_purge(&htt->tx_compl_q);
230 skb_queue_purge(&htt->rx_compl_q); 231 skb_queue_purge(&htt->rx_compl_q);
231 skb_queue_purge(&htt->rx_in_ord_compl_q); 232 skb_queue_purge(&htt->rx_in_ord_compl_q);
233 skb_queue_purge(&htt->tx_fetch_ind_q);
232 234
233 ath10k_htt_rx_ring_free(htt); 235 ath10k_htt_rx_ring_free(htt);
234 236
@@ -281,7 +283,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
281 283
282/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ 284/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
283static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 285static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
284 u8 **fw_desc, int *fw_desc_len,
285 struct sk_buff_head *amsdu) 286 struct sk_buff_head *amsdu)
286{ 287{
287 struct ath10k *ar = htt->ar; 288 struct ath10k *ar = htt->ar;
@@ -323,48 +324,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
323 return -EIO; 324 return -EIO;
324 } 325 }
325 326
326 /*
327 * Copy the FW rx descriptor for this MSDU from the rx
328 * indication message into the MSDU's netbuf. HL uses the
329 * same rx indication message definition as LL, and simply
330 * appends new info (fields from the HW rx desc, and the
331 * MSDU payload itself). So, the offset into the rx
332 * indication message only has to account for the standard
333 * offset of the per-MSDU FW rx desc info within the
334 * message, and how many bytes of the per-MSDU FW rx desc
335 * info have already been consumed. (And the endianness of
336 * the host, since for a big-endian host, the rx ind
337 * message contents, including the per-MSDU rx desc bytes,
338 * were byteswapped during upload.)
339 */
340 if (*fw_desc_len > 0) {
341 rx_desc->fw_desc.info0 = **fw_desc;
342 /*
343 * The target is expected to only provide the basic
344 * per-MSDU rx descriptors. Just to be sure, verify
345 * that the target has not attached extension data
346 * (e.g. LRO flow ID).
347 */
348
349 /* or more, if there's extension data */
350 (*fw_desc)++;
351 (*fw_desc_len)--;
352 } else {
353 /*
354 * When an oversized AMSDU happened, FW will lost
355 * some of MSDU status - in this case, the FW
356 * descriptors provided will be less than the
357 * actual MSDUs inside this MPDU. Mark the FW
358 * descriptors so that it will still deliver to
359 * upper stack, if no CRC error for this MPDU.
360 *
361 * FIX THIS - the FW descriptors are actually for
362 * MSDUs in the end of this A-MSDU instead of the
363 * beginning.
364 */
365 rx_desc->fw_desc.info0 = 0;
366 }
367
368 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) 327 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
369 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | 328 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
370 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); 329 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
@@ -423,13 +382,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
423 return msdu_chaining; 382 return msdu_chaining;
424} 383}
425 384
426static void ath10k_htt_rx_replenish_task(unsigned long ptr)
427{
428 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
429
430 ath10k_htt_rx_msdu_buff_replenish(htt);
431}
432
433static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, 385static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
434 u32 paddr) 386 u32 paddr)
435{ 387{
@@ -563,12 +515,10 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
563 htt->rx_ring.sw_rd_idx.msdu_payld = 0; 515 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
564 hash_init(htt->rx_ring.skb_table); 516 hash_init(htt->rx_ring.skb_table);
565 517
566 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
567 (unsigned long)htt);
568
569 skb_queue_head_init(&htt->tx_compl_q);
570 skb_queue_head_init(&htt->rx_compl_q); 518 skb_queue_head_init(&htt->rx_compl_q);
571 skb_queue_head_init(&htt->rx_in_ord_compl_q); 519 skb_queue_head_init(&htt->rx_in_ord_compl_q);
520 skb_queue_head_init(&htt->tx_fetch_ind_q);
521 atomic_set(&htt->num_mpdus_ready, 0);
572 522
573 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, 523 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
574 (unsigned long)htt); 524 (unsigned long)htt);
@@ -860,6 +810,8 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
860 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); 810 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
861 if (!ch) 811 if (!ch)
862 ch = ath10k_htt_rx_h_any_channel(ar); 812 ch = ath10k_htt_rx_h_any_channel(ar);
813 if (!ch)
814 ch = ar->tgt_oper_chan;
863 spin_unlock_bh(&ar->data_lock); 815 spin_unlock_bh(&ar->data_lock);
864 816
865 if (!ch) 817 if (!ch)
@@ -979,7 +931,7 @@ static void ath10k_process_rx(struct ath10k *ar,
979 *status = *rx_status; 931 *status = *rx_status;
980 932
981 ath10k_dbg(ar, ATH10K_DBG_DATA, 933 ath10k_dbg(ar, ATH10K_DBG_DATA,
982 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 934 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
983 skb, 935 skb,
984 skb->len, 936 skb->len,
985 ieee80211_get_SA(hdr), 937 ieee80211_get_SA(hdr),
@@ -1076,20 +1028,25 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1076 hdr = (void *)msdu->data; 1028 hdr = (void *)msdu->data;
1077 1029
1078 /* Tail */ 1030 /* Tail */
1079 skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype)); 1031 if (status->flag & RX_FLAG_IV_STRIPPED)
1032 skb_trim(msdu, msdu->len -
1033 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1080 1034
1081 /* MMIC */ 1035 /* MMIC */
1082 if (!ieee80211_has_morefrags(hdr->frame_control) && 1036 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1037 !ieee80211_has_morefrags(hdr->frame_control) &&
1083 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1038 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1084 skb_trim(msdu, msdu->len - 8); 1039 skb_trim(msdu, msdu->len - 8);
1085 1040
1086 /* Head */ 1041 /* Head */
1087 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1042 if (status->flag & RX_FLAG_IV_STRIPPED) {
1088 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1043 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1044 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1089 1045
1090 memmove((void *)msdu->data + crypto_len, 1046 memmove((void *)msdu->data + crypto_len,
1091 (void *)msdu->data, hdr_len); 1047 (void *)msdu->data, hdr_len);
1092 skb_pull(msdu, crypto_len); 1048 skb_pull(msdu, crypto_len);
1049 }
1093} 1050}
1094 1051
1095static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, 1052static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
@@ -1343,6 +1300,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1343 bool has_tkip_err; 1300 bool has_tkip_err;
1344 bool has_peer_idx_invalid; 1301 bool has_peer_idx_invalid;
1345 bool is_decrypted; 1302 bool is_decrypted;
1303 bool is_mgmt;
1346 u32 attention; 1304 u32 attention;
1347 1305
1348 if (skb_queue_empty(amsdu)) 1306 if (skb_queue_empty(amsdu))
@@ -1351,6 +1309,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1351 first = skb_peek(amsdu); 1309 first = skb_peek(amsdu);
1352 rxd = (void *)first->data - sizeof(*rxd); 1310 rxd = (void *)first->data - sizeof(*rxd);
1353 1311
1312 is_mgmt = !!(rxd->attention.flags &
1313 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1314
1354 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), 1315 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1355 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 1316 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1356 1317
@@ -1392,6 +1353,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1392 RX_FLAG_MMIC_ERROR | 1353 RX_FLAG_MMIC_ERROR |
1393 RX_FLAG_DECRYPTED | 1354 RX_FLAG_DECRYPTED |
1394 RX_FLAG_IV_STRIPPED | 1355 RX_FLAG_IV_STRIPPED |
1356 RX_FLAG_ONLY_MONITOR |
1395 RX_FLAG_MMIC_STRIPPED); 1357 RX_FLAG_MMIC_STRIPPED);
1396 1358
1397 if (has_fcs_err) 1359 if (has_fcs_err)
@@ -1400,10 +1362,21 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1400 if (has_tkip_err) 1362 if (has_tkip_err)
1401 status->flag |= RX_FLAG_MMIC_ERROR; 1363 status->flag |= RX_FLAG_MMIC_ERROR;
1402 1364
1403 if (is_decrypted) 1365 /* Firmware reports all necessary management frames via WMI already.
1404 status->flag |= RX_FLAG_DECRYPTED | 1366 * They are not reported to monitor interfaces at all so pass the ones
1405 RX_FLAG_IV_STRIPPED | 1367 * coming via HTT to monitor interfaces instead. This simplifies
1406 RX_FLAG_MMIC_STRIPPED; 1368 * matters a lot.
1369 */
1370 if (is_mgmt)
1371 status->flag |= RX_FLAG_ONLY_MONITOR;
1372
1373 if (is_decrypted) {
1374 status->flag |= RX_FLAG_DECRYPTED;
1375
1376 if (likely(!is_mgmt))
1377 status->flag |= RX_FLAG_IV_STRIPPED |
1378 RX_FLAG_MMIC_STRIPPED;
1379}
1407 1380
1408 skb_queue_walk(amsdu, msdu) { 1381 skb_queue_walk(amsdu, msdu) {
1409 ath10k_htt_rx_h_csum_offload(msdu); 1382 ath10k_htt_rx_h_csum_offload(msdu);
@@ -1416,6 +1389,8 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1416 */ 1389 */
1417 if (!is_decrypted) 1390 if (!is_decrypted)
1418 continue; 1391 continue;
1392 if (is_mgmt)
1393 continue;
1419 1394
1420 hdr = (void *)msdu->data; 1395 hdr = (void *)msdu->data;
1421 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 1396 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
@@ -1516,14 +1491,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1516 struct sk_buff_head *amsdu, 1491 struct sk_buff_head *amsdu,
1517 struct ieee80211_rx_status *rx_status) 1492 struct ieee80211_rx_status *rx_status)
1518{ 1493{
1519 struct sk_buff *msdu;
1520 struct htt_rx_desc *rxd;
1521 bool is_mgmt;
1522 bool has_fcs_err;
1523
1524 msdu = skb_peek(amsdu);
1525 rxd = (void *)msdu->data - sizeof(*rxd);
1526
1527 /* FIXME: It might be a good idea to do some fuzzy-testing to drop 1494 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1528 * invalid/dangerous frames. 1495 * invalid/dangerous frames.
1529 */ 1496 */
@@ -1533,23 +1500,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1533 return false; 1500 return false;
1534 } 1501 }
1535 1502
1536 is_mgmt = !!(rxd->attention.flags &
1537 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1538 has_fcs_err = !!(rxd->attention.flags &
1539 __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
1540
1541 /* Management frames are handled via WMI events. The pros of such
1542 * approach is that channel is explicitly provided in WMI events
1543 * whereas HTT doesn't provide channel information for Rxed frames.
1544 *
1545 * However some firmware revisions don't report corrupted frames via
1546 * WMI so don't drop them.
1547 */
1548 if (is_mgmt && !has_fcs_err) {
1549 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
1550 return false;
1551 }
1552
1553 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 1503 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1554 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); 1504 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1555 return false; 1505 return false;
@@ -1571,25 +1521,49 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1571 __skb_queue_purge(amsdu); 1521 __skb_queue_purge(amsdu);
1572} 1522}
1573 1523
1574static void ath10k_htt_rx_handler(struct ath10k_htt *htt, 1524static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1575 struct htt_rx_indication *rx)
1576{ 1525{
1577 struct ath10k *ar = htt->ar; 1526 struct ath10k *ar = htt->ar;
1578 struct ieee80211_rx_status *rx_status = &htt->rx_status; 1527 static struct ieee80211_rx_status rx_status;
1579 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1580 struct sk_buff_head amsdu; 1528 struct sk_buff_head amsdu;
1581 int num_mpdu_ranges; 1529 int ret;
1582 int fw_desc_len;
1583 u8 *fw_desc;
1584 int i, ret, mpdu_count = 0;
1585 1530
1586 lockdep_assert_held(&htt->rx_ring.lock); 1531 __skb_queue_head_init(&amsdu);
1587 1532
1588 if (htt->rx_confused) 1533 spin_lock_bh(&htt->rx_ring.lock);
1589 return; 1534 if (htt->rx_confused) {
1535 spin_unlock_bh(&htt->rx_ring.lock);
1536 return -EIO;
1537 }
1538 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1539 spin_unlock_bh(&htt->rx_ring.lock);
1590 1540
1591 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); 1541 if (ret < 0) {
1592 fw_desc = (u8 *)&rx->fw_desc; 1542 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1543 __skb_queue_purge(&amsdu);
1544 /* FIXME: It's probably a good idea to reboot the
1545 * device instead of leaving it inoperable.
1546 */
1547 htt->rx_confused = true;
1548 return ret;
1549 }
1550
1551 ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
1552 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1553 ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
1554 ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
1555 ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
1556
1557 return 0;
1558}
1559
1560static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1561 struct htt_rx_indication *rx)
1562{
1563 struct ath10k *ar = htt->ar;
1564 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1565 int num_mpdu_ranges;
1566 int i, mpdu_count = 0;
1593 1567
1594 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 1568 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1595 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 1569 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
@@ -1603,80 +1577,19 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
1603 for (i = 0; i < num_mpdu_ranges; i++) 1577 for (i = 0; i < num_mpdu_ranges; i++)
1604 mpdu_count += mpdu_ranges[i].mpdu_count; 1578 mpdu_count += mpdu_ranges[i].mpdu_count;
1605 1579
1606 while (mpdu_count--) { 1580 atomic_add(mpdu_count, &htt->num_mpdus_ready);
1607 __skb_queue_head_init(&amsdu);
1608 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
1609 &fw_desc_len, &amsdu);
1610 if (ret < 0) {
1611 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1612 __skb_queue_purge(&amsdu);
1613 /* FIXME: It's probably a good idea to reboot the
1614 * device instead of leaving it inoperable.
1615 */
1616 htt->rx_confused = true;
1617 break;
1618 }
1619 1581
1620 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); 1582 tasklet_schedule(&htt->txrx_compl_task);
1621 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1622 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1623 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1624 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1625 }
1626
1627 tasklet_schedule(&htt->rx_replenish_task);
1628} 1583}
1629 1584
1630static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, 1585static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
1631 struct htt_rx_fragment_indication *frag)
1632{ 1586{
1633 struct ath10k *ar = htt->ar; 1587 atomic_inc(&htt->num_mpdus_ready);
1634 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1635 struct sk_buff_head amsdu;
1636 int ret;
1637 u8 *fw_desc;
1638 int fw_desc_len;
1639
1640 fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
1641 fw_desc = (u8 *)frag->fw_msdu_rx_desc;
1642
1643 __skb_queue_head_init(&amsdu);
1644 1588
1645 spin_lock_bh(&htt->rx_ring.lock); 1589 tasklet_schedule(&htt->txrx_compl_task);
1646 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1647 &amsdu);
1648 spin_unlock_bh(&htt->rx_ring.lock);
1649
1650 tasklet_schedule(&htt->rx_replenish_task);
1651
1652 ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
1653
1654 if (ret) {
1655 ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1656 ret);
1657 __skb_queue_purge(&amsdu);
1658 return;
1659 }
1660
1661 if (skb_queue_len(&amsdu) != 1) {
1662 ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
1663 __skb_queue_purge(&amsdu);
1664 return;
1665 }
1666
1667 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1668 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1669 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1670 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1671
1672 if (fw_desc_len > 0) {
1673 ath10k_dbg(ar, ATH10K_DBG_HTT,
1674 "expecting more fragmented rx in one indication %d\n",
1675 fw_desc_len);
1676 }
1677} 1590}
1678 1591
1679static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, 1592static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1680 struct sk_buff *skb) 1593 struct sk_buff *skb)
1681{ 1594{
1682 struct ath10k_htt *htt = &ar->htt; 1595 struct ath10k_htt *htt = &ar->htt;
@@ -1688,19 +1601,19 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
1688 1601
1689 switch (status) { 1602 switch (status) {
1690 case HTT_DATA_TX_STATUS_NO_ACK: 1603 case HTT_DATA_TX_STATUS_NO_ACK:
1691 tx_done.no_ack = true; 1604 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1692 break; 1605 break;
1693 case HTT_DATA_TX_STATUS_OK: 1606 case HTT_DATA_TX_STATUS_OK:
1694 tx_done.success = true; 1607 tx_done.status = HTT_TX_COMPL_STATE_ACK;
1695 break; 1608 break;
1696 case HTT_DATA_TX_STATUS_DISCARD: 1609 case HTT_DATA_TX_STATUS_DISCARD:
1697 case HTT_DATA_TX_STATUS_POSTPONE: 1610 case HTT_DATA_TX_STATUS_POSTPONE:
1698 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: 1611 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1699 tx_done.discard = true; 1612 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1700 break; 1613 break;
1701 default: 1614 default:
1702 ath10k_warn(ar, "unhandled tx completion status %d\n", status); 1615 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1703 tx_done.discard = true; 1616 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1704 break; 1617 break;
1705 } 1618 }
1706 1619
@@ -1710,7 +1623,20 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
1710 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { 1623 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1711 msdu_id = resp->data_tx_completion.msdus[i]; 1624 msdu_id = resp->data_tx_completion.msdus[i];
1712 tx_done.msdu_id = __le16_to_cpu(msdu_id); 1625 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1713 ath10k_txrx_tx_unref(htt, &tx_done); 1626
1627 /* kfifo_put: In practice firmware shouldn't fire off per-CE
1628 * interrupt and main interrupt (MSI/-X range case) for the same
1629 * HTC service so it should be safe to use kfifo_put w/o lock.
1630 *
1631 * From kfifo_put() documentation:
1632 * Note that with only one concurrent reader and one concurrent
1633 * writer, you don't need extra locking to use these macro.
1634 */
1635 if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1636 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1637 tx_done.msdu_id, tx_done.status);
1638 ath10k_txrx_tx_unref(htt, &tx_done);
1639 }
1714 } 1640 }
1715} 1641}
1716 1642
@@ -1978,11 +1904,324 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1978 return; 1904 return;
1979 } 1905 }
1980 } 1906 }
1907 ath10k_htt_rx_msdu_buff_replenish(htt);
1908}
1909
1910static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
1911 const __le32 *resp_ids,
1912 int num_resp_ids)
1913{
1914 int i;
1915 u32 resp_id;
1916
1917 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
1918 num_resp_ids);
1919
1920 for (i = 0; i < num_resp_ids; i++) {
1921 resp_id = le32_to_cpu(resp_ids[i]);
1922
1923 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
1924 resp_id);
1925
1926 /* TODO: free resp_id */
1927 }
1928}
1929
1930static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
1931{
1932 struct ieee80211_hw *hw = ar->hw;
1933 struct ieee80211_txq *txq;
1934 struct htt_resp *resp = (struct htt_resp *)skb->data;
1935 struct htt_tx_fetch_record *record;
1936 size_t len;
1937 size_t max_num_bytes;
1938 size_t max_num_msdus;
1939 size_t num_bytes;
1940 size_t num_msdus;
1941 const __le32 *resp_ids;
1942 u16 num_records;
1943 u16 num_resp_ids;
1944 u16 peer_id;
1945 u8 tid;
1946 int ret;
1947 int i;
1948
1949 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
1950
1951 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
1952 if (unlikely(skb->len < len)) {
1953 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
1954 return;
1955 }
1956
1957 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
1958 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
1959
1960 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
1961 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
1962
1963 if (unlikely(skb->len < len)) {
1964 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
1965 return;
1966 }
1967
1968 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
1969 num_records, num_resp_ids,
1970 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
1971
1972 if (!ar->htt.tx_q_state.enabled) {
1973 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
1974 return;
1975 }
1976
1977 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
1978 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
1979 return;
1980 }
1981
1982 rcu_read_lock();
1983
1984 for (i = 0; i < num_records; i++) {
1985 record = &resp->tx_fetch_ind.records[i];
1986 peer_id = MS(le16_to_cpu(record->info),
1987 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
1988 tid = MS(le16_to_cpu(record->info),
1989 HTT_TX_FETCH_RECORD_INFO_TID);
1990 max_num_msdus = le16_to_cpu(record->num_msdus);
1991 max_num_bytes = le32_to_cpu(record->num_bytes);
1992
1993 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
1994 i, peer_id, tid, max_num_msdus, max_num_bytes);
1995
1996 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
1997 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
1998 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
1999 peer_id, tid);
2000 continue;
2001 }
2002
2003 spin_lock_bh(&ar->data_lock);
2004 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2005 spin_unlock_bh(&ar->data_lock);
2006
2007 /* It is okay to release the lock and use txq because RCU read
2008 * lock is held.
2009 */
2010
2011 if (unlikely(!txq)) {
2012 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2013 peer_id, tid);
2014 continue;
2015 }
2016
2017 num_msdus = 0;
2018 num_bytes = 0;
2019
2020 while (num_msdus < max_num_msdus &&
2021 num_bytes < max_num_bytes) {
2022 ret = ath10k_mac_tx_push_txq(hw, txq);
2023 if (ret < 0)
2024 break;
2025
2026 num_msdus++;
2027 num_bytes += ret;
2028 }
2029
2030 record->num_msdus = cpu_to_le16(num_msdus);
2031 record->num_bytes = cpu_to_le32(num_bytes);
2032
2033 ath10k_htt_tx_txq_recalc(hw, txq);
2034 }
2035
2036 rcu_read_unlock();
2037
2038 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2039 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2040
2041 ret = ath10k_htt_tx_fetch_resp(ar,
2042 resp->tx_fetch_ind.token,
2043 resp->tx_fetch_ind.fetch_seq_num,
2044 resp->tx_fetch_ind.records,
2045 num_records);
2046 if (unlikely(ret)) {
2047 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2048 le32_to_cpu(resp->tx_fetch_ind.token), ret);
2049 /* FIXME: request fw restart */
2050 }
1981 2051
1982 tasklet_schedule(&htt->rx_replenish_task); 2052 ath10k_htt_tx_txq_sync(ar);
1983} 2053}
1984 2054
1985void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 2055static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2056 struct sk_buff *skb)
2057{
2058 const struct htt_resp *resp = (void *)skb->data;
2059 size_t len;
2060 int num_resp_ids;
2061
2062 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2063
2064 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2065 if (unlikely(skb->len < len)) {
2066 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2067 return;
2068 }
2069
2070 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2071 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2072
2073 if (unlikely(skb->len < len)) {
2074 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2075 return;
2076 }
2077
2078 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2079 resp->tx_fetch_confirm.resp_ids,
2080 num_resp_ids);
2081}
2082
2083static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2084 struct sk_buff *skb)
2085{
2086 const struct htt_resp *resp = (void *)skb->data;
2087 const struct htt_tx_mode_switch_record *record;
2088 struct ieee80211_txq *txq;
2089 struct ath10k_txq *artxq;
2090 size_t len;
2091 size_t num_records;
2092 enum htt_tx_mode_switch_mode mode;
2093 bool enable;
2094 u16 info0;
2095 u16 info1;
2096 u16 threshold;
2097 u16 peer_id;
2098 u8 tid;
2099 int i;
2100
2101 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2102
2103 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2104 if (unlikely(skb->len < len)) {
2105 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2106 return;
2107 }
2108
2109 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2110 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2111
2112 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2113 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2114 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2115 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2116
2117 ath10k_dbg(ar, ATH10K_DBG_HTT,
2118 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2119 info0, info1, enable, num_records, mode, threshold);
2120
2121 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2122
2123 if (unlikely(skb->len < len)) {
2124 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2125 return;
2126 }
2127
2128 switch (mode) {
2129 case HTT_TX_MODE_SWITCH_PUSH:
2130 case HTT_TX_MODE_SWITCH_PUSH_PULL:
2131 break;
2132 default:
2133 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2134 mode);
2135 return;
2136 }
2137
2138 if (!enable)
2139 return;
2140
2141 ar->htt.tx_q_state.enabled = enable;
2142 ar->htt.tx_q_state.mode = mode;
2143 ar->htt.tx_q_state.num_push_allowed = threshold;
2144
2145 rcu_read_lock();
2146
2147 for (i = 0; i < num_records; i++) {
2148 record = &resp->tx_mode_switch_ind.records[i];
2149 info0 = le16_to_cpu(record->info0);
2150 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2151 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2152
2153 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2154 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2155 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2156 peer_id, tid);
2157 continue;
2158 }
2159
2160 spin_lock_bh(&ar->data_lock);
2161 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2162 spin_unlock_bh(&ar->data_lock);
2163
2164 /* It is okay to release the lock and use txq because RCU read
2165 * lock is held.
2166 */
2167
2168 if (unlikely(!txq)) {
2169 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2170 peer_id, tid);
2171 continue;
2172 }
2173
2174 spin_lock_bh(&ar->htt.tx_lock);
2175 artxq = (void *)txq->drv_priv;
2176 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2177 spin_unlock_bh(&ar->htt.tx_lock);
2178 }
2179
2180 rcu_read_unlock();
2181
2182 ath10k_mac_tx_push_pending(ar);
2183}
2184
2185static inline enum nl80211_band phy_mode_to_band(u32 phy_mode)
2186{
2187 enum nl80211_band band;
2188
2189 switch (phy_mode) {
2190 case MODE_11A:
2191 case MODE_11NA_HT20:
2192 case MODE_11NA_HT40:
2193 case MODE_11AC_VHT20:
2194 case MODE_11AC_VHT40:
2195 case MODE_11AC_VHT80:
2196 band = NL80211_BAND_5GHZ;
2197 break;
2198 case MODE_11G:
2199 case MODE_11B:
2200 case MODE_11GONLY:
2201 case MODE_11NG_HT20:
2202 case MODE_11NG_HT40:
2203 case MODE_11AC_VHT20_2G:
2204 case MODE_11AC_VHT40_2G:
2205 case MODE_11AC_VHT80_2G:
2206 default:
2207 band = NL80211_BAND_2GHZ;
2208 }
2209
2210 return band;
2211}
2212
2213void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2214{
2215 bool release;
2216
2217 release = ath10k_htt_t2h_msg_handler(ar, skb);
2218
2219 /* Free the indication buffer */
2220 if (release)
2221 dev_kfree_skb_any(skb);
2222}
2223
2224bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1986{ 2225{
1987 struct ath10k_htt *htt = &ar->htt; 2226 struct ath10k_htt *htt = &ar->htt;
1988 struct htt_resp *resp = (struct htt_resp *)skb->data; 2227 struct htt_resp *resp = (struct htt_resp *)skb->data;
@@ -1998,8 +2237,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1998 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { 2237 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
1999 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", 2238 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2000 resp->hdr.msg_type, ar->htt.t2h_msg_types_max); 2239 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2001 dev_kfree_skb_any(skb); 2240 return true;
2002 return;
2003 } 2241 }
2004 type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; 2242 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2005 2243
@@ -2011,9 +2249,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2011 break; 2249 break;
2012 } 2250 }
2013 case HTT_T2H_MSG_TYPE_RX_IND: 2251 case HTT_T2H_MSG_TYPE_RX_IND:
2014 skb_queue_tail(&htt->rx_compl_q, skb); 2252 ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2015 tasklet_schedule(&htt->txrx_compl_task); 2253 break;
2016 return;
2017 case HTT_T2H_MSG_TYPE_PEER_MAP: { 2254 case HTT_T2H_MSG_TYPE_PEER_MAP: {
2018 struct htt_peer_map_event ev = { 2255 struct htt_peer_map_event ev = {
2019 .vdev_id = resp->peer_map.vdev_id, 2256 .vdev_id = resp->peer_map.vdev_id,
@@ -2034,28 +2271,33 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2034 struct htt_tx_done tx_done = {}; 2271 struct htt_tx_done tx_done = {};
2035 int status = __le32_to_cpu(resp->mgmt_tx_completion.status); 2272 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2036 2273
2037 tx_done.msdu_id = 2274 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2038 __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2039 2275
2040 switch (status) { 2276 switch (status) {
2041 case HTT_MGMT_TX_STATUS_OK: 2277 case HTT_MGMT_TX_STATUS_OK:
2042 tx_done.success = true; 2278 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2043 break; 2279 break;
2044 case HTT_MGMT_TX_STATUS_RETRY: 2280 case HTT_MGMT_TX_STATUS_RETRY:
2045 tx_done.no_ack = true; 2281 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2046 break; 2282 break;
2047 case HTT_MGMT_TX_STATUS_DROP: 2283 case HTT_MGMT_TX_STATUS_DROP:
2048 tx_done.discard = true; 2284 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2049 break; 2285 break;
2050 } 2286 }
2051 2287
2052 ath10k_txrx_tx_unref(htt, &tx_done); 2288 status = ath10k_txrx_tx_unref(htt, &tx_done);
2289 if (!status) {
2290 spin_lock_bh(&htt->tx_lock);
2291 ath10k_htt_tx_mgmt_dec_pending(htt);
2292 spin_unlock_bh(&htt->tx_lock);
2293 }
2294 ath10k_mac_tx_push_pending(ar);
2053 break; 2295 break;
2054 } 2296 }
2055 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: 2297 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2056 skb_queue_tail(&htt->tx_compl_q, skb); 2298 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2057 tasklet_schedule(&htt->txrx_compl_task); 2299 tasklet_schedule(&htt->txrx_compl_task);
2058 return; 2300 break;
2059 case HTT_T2H_MSG_TYPE_SEC_IND: { 2301 case HTT_T2H_MSG_TYPE_SEC_IND: {
2060 struct ath10k *ar = htt->ar; 2302 struct ath10k *ar = htt->ar;
2061 struct htt_security_indication *ev = &resp->security_indication; 2303 struct htt_security_indication *ev = &resp->security_indication;
@@ -2071,7 +2313,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2071 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { 2313 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2072 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 2314 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2073 skb->data, skb->len); 2315 skb->data, skb->len);
2074 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); 2316 ath10k_htt_rx_frag_handler(htt);
2075 break; 2317 break;
2076 } 2318 }
2077 case HTT_T2H_MSG_TYPE_TEST: 2319 case HTT_T2H_MSG_TYPE_TEST:
@@ -2111,18 +2353,39 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2111 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { 2353 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2112 skb_queue_tail(&htt->rx_in_ord_compl_q, skb); 2354 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2113 tasklet_schedule(&htt->txrx_compl_task); 2355 tasklet_schedule(&htt->txrx_compl_task);
2114 return; 2356 return false;
2115 } 2357 }
2116 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: 2358 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2117 break; 2359 break;
2118 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: 2360 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2361 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2362 u32 freq = __le32_to_cpu(resp->chan_change.freq);
2363
2364 ar->tgt_oper_chan =
2365 __ieee80211_get_channel(ar->hw->wiphy, freq);
2366 ath10k_dbg(ar, ATH10K_DBG_HTT,
2367 "htt chan change freq %u phymode %s\n",
2368 freq, ath10k_wmi_phymode_str(phymode));
2119 break; 2369 break;
2370 }
2120 case HTT_T2H_MSG_TYPE_AGGR_CONF: 2371 case HTT_T2H_MSG_TYPE_AGGR_CONF:
2121 break; 2372 break;
2122 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: 2373 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2374 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2375
2376 if (!tx_fetch_ind) {
2377 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2378 break;
2379 }
2380 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2381 tasklet_schedule(&htt->txrx_compl_task);
2382 break;
2383 }
2123 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: 2384 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2385 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2386 break;
2124 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: 2387 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2125 /* TODO: Implement pull-push logic */ 2388 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2126 break; 2389 break;
2127 case HTT_T2H_MSG_TYPE_EN_STATS: 2390 case HTT_T2H_MSG_TYPE_EN_STATS:
2128 default: 2391 default:
@@ -2132,9 +2395,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2132 skb->data, skb->len); 2395 skb->data, skb->len);
2133 break; 2396 break;
2134 }; 2397 };
2135 2398 return true;
2136 /* Free the indication buffer */
2137 dev_kfree_skb_any(skb);
2138} 2399}
2139EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); 2400EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2140 2401
@@ -2150,40 +2411,47 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
2150{ 2411{
2151 struct ath10k_htt *htt = (struct ath10k_htt *)ptr; 2412 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
2152 struct ath10k *ar = htt->ar; 2413 struct ath10k *ar = htt->ar;
2153 struct sk_buff_head tx_q; 2414 struct htt_tx_done tx_done = {};
2154 struct sk_buff_head rx_q;
2155 struct sk_buff_head rx_ind_q; 2415 struct sk_buff_head rx_ind_q;
2156 struct htt_resp *resp; 2416 struct sk_buff_head tx_ind_q;
2157 struct sk_buff *skb; 2417 struct sk_buff *skb;
2158 unsigned long flags; 2418 unsigned long flags;
2419 int num_mpdus;
2159 2420
2160 __skb_queue_head_init(&tx_q);
2161 __skb_queue_head_init(&rx_q);
2162 __skb_queue_head_init(&rx_ind_q); 2421 __skb_queue_head_init(&rx_ind_q);
2163 2422 __skb_queue_head_init(&tx_ind_q);
2164 spin_lock_irqsave(&htt->tx_compl_q.lock, flags);
2165 skb_queue_splice_init(&htt->tx_compl_q, &tx_q);
2166 spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags);
2167
2168 spin_lock_irqsave(&htt->rx_compl_q.lock, flags);
2169 skb_queue_splice_init(&htt->rx_compl_q, &rx_q);
2170 spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags);
2171 2423
2172 spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags); 2424 spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
2173 skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q); 2425 skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
2174 spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags); 2426 spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
2175 2427
2176 while ((skb = __skb_dequeue(&tx_q))) { 2428 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2177 ath10k_htt_rx_frm_tx_compl(htt->ar, skb); 2429 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2430 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2431
2432 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2433 * From kfifo_get() documentation:
2434 * Note that with only one concurrent reader and one concurrent writer,
2435 * you don't need extra locking to use these macro.
2436 */
2437 while (kfifo_get(&htt->txdone_fifo, &tx_done))
2438 ath10k_txrx_tx_unref(htt, &tx_done);
2439
2440 while ((skb = __skb_dequeue(&tx_ind_q))) {
2441 ath10k_htt_rx_tx_fetch_ind(ar, skb);
2178 dev_kfree_skb_any(skb); 2442 dev_kfree_skb_any(skb);
2179 } 2443 }
2180 2444
2181 while ((skb = __skb_dequeue(&rx_q))) { 2445 ath10k_mac_tx_push_pending(ar);
2182 resp = (struct htt_resp *)skb->data; 2446
2183 spin_lock_bh(&htt->rx_ring.lock); 2447 num_mpdus = atomic_read(&htt->num_mpdus_ready);
2184 ath10k_htt_rx_handler(htt, &resp->rx_ind); 2448
2185 spin_unlock_bh(&htt->rx_ring.lock); 2449 while (num_mpdus) {
2186 dev_kfree_skb_any(skb); 2450 if (ath10k_htt_rx_handle_amsdu(htt))
2451 break;
2452
2453 num_mpdus--;
2454 atomic_dec(&htt->num_mpdus_ready);
2187 } 2455 }
2188 2456
2189 while ((skb = __skb_dequeue(&rx_ind_q))) { 2457 while ((skb = __skb_dequeue(&rx_ind_q))) {
@@ -2192,4 +2460,6 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
2192 spin_unlock_bh(&htt->rx_ring.lock); 2460 spin_unlock_bh(&htt->rx_ring.lock);
2193 dev_kfree_skb_any(skb); 2461 dev_kfree_skb_any(skb);
2194 } 2462 }
2463
2464 ath10k_htt_rx_msdu_buff_replenish(htt);
2195} 2465}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 95acb727c068..9baa2e677f8a 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -22,53 +22,183 @@
22#include "txrx.h" 22#include "txrx.h"
23#include "debug.h" 23#include "debug.h"
24 24
25void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc) 25static u8 ath10k_htt_tx_txq_calc_size(size_t count)
26{ 26{
27 if (limit_mgmt_desc) 27 int exp;
28 htt->num_pending_mgmt_tx--; 28 int factor;
29
30 exp = 0;
31 factor = count >> 7;
32
33 while (factor >= 64 && exp < 4) {
34 factor >>= 3;
35 exp++;
36 }
37
38 if (exp == 4)
39 return 0xff;
40
41 if (count > 0)
42 factor = max(1, factor);
43
44 return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
45 SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
46}
47
48static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
49 struct ieee80211_txq *txq)
50{
51 struct ath10k *ar = hw->priv;
52 struct ath10k_sta *arsta = (void *)txq->sta->drv_priv;
53 struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
54 unsigned long frame_cnt;
55 unsigned long byte_cnt;
56 int idx;
57 u32 bit;
58 u16 peer_id;
59 u8 tid;
60 u8 count;
61
62 lockdep_assert_held(&ar->htt.tx_lock);
63
64 if (!ar->htt.tx_q_state.enabled)
65 return;
66
67 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
68 return;
69
70 if (txq->sta)
71 peer_id = arsta->peer_id;
72 else
73 peer_id = arvif->peer_id;
74
75 tid = txq->tid;
76 bit = BIT(peer_id % 32);
77 idx = peer_id / 32;
78
79 ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
80 count = ath10k_htt_tx_txq_calc_size(byte_cnt);
81
82 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
83 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
84 ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
85 peer_id, tid);
86 return;
87 }
88
89 ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
90 ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
91 ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
92
93 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
94 peer_id, tid, count);
95}
96
97static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
98{
99 u32 seq;
100 size_t size;
101
102 lockdep_assert_held(&ar->htt.tx_lock);
103
104 if (!ar->htt.tx_q_state.enabled)
105 return;
106
107 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
108 return;
109
110 seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
111 seq++;
112 ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
113
114 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
115 seq);
116
117 size = sizeof(*ar->htt.tx_q_state.vaddr);
118 dma_sync_single_for_device(ar->dev,
119 ar->htt.tx_q_state.paddr,
120 size,
121 DMA_TO_DEVICE);
122}
123
124void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
125 struct ieee80211_txq *txq)
126{
127 struct ath10k *ar = hw->priv;
128
129 spin_lock_bh(&ar->htt.tx_lock);
130 __ath10k_htt_tx_txq_recalc(hw, txq);
131 spin_unlock_bh(&ar->htt.tx_lock);
132}
133
134void ath10k_htt_tx_txq_sync(struct ath10k *ar)
135{
136 spin_lock_bh(&ar->htt.tx_lock);
137 __ath10k_htt_tx_txq_sync(ar);
138 spin_unlock_bh(&ar->htt.tx_lock);
139}
140
141void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
142 struct ieee80211_txq *txq)
143{
144 struct ath10k *ar = hw->priv;
145
146 spin_lock_bh(&ar->htt.tx_lock);
147 __ath10k_htt_tx_txq_recalc(hw, txq);
148 __ath10k_htt_tx_txq_sync(ar);
149 spin_unlock_bh(&ar->htt.tx_lock);
150}
151
152void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
153{
154 lockdep_assert_held(&htt->tx_lock);
29 155
30 htt->num_pending_tx--; 156 htt->num_pending_tx--;
31 if (htt->num_pending_tx == htt->max_num_pending_tx - 1) 157 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
32 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); 158 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
33} 159}
34 160
35static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, 161int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
36 bool limit_mgmt_desc)
37{ 162{
38 spin_lock_bh(&htt->tx_lock); 163 lockdep_assert_held(&htt->tx_lock);
39 __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); 164
40 spin_unlock_bh(&htt->tx_lock); 165 if (htt->num_pending_tx >= htt->max_num_pending_tx)
166 return -EBUSY;
167
168 htt->num_pending_tx++;
169 if (htt->num_pending_tx == htt->max_num_pending_tx)
170 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
171
172 return 0;
41} 173}
42 174
43static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, 175int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
44 bool limit_mgmt_desc, bool is_probe_resp) 176 bool is_presp)
45{ 177{
46 struct ath10k *ar = htt->ar; 178 struct ath10k *ar = htt->ar;
47 int ret = 0;
48 179
49 spin_lock_bh(&htt->tx_lock); 180 lockdep_assert_held(&htt->tx_lock);
50 181
51 if (htt->num_pending_tx >= htt->max_num_pending_tx) { 182 if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
52 ret = -EBUSY; 183 return 0;
53 goto exit;
54 }
55 184
56 if (limit_mgmt_desc) { 185 if (is_presp &&
57 if (is_probe_resp && (htt->num_pending_mgmt_tx > 186 ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
58 ar->hw_params.max_probe_resp_desc_thres)) { 187 return -EBUSY;
59 ret = -EBUSY;
60 goto exit;
61 }
62 htt->num_pending_mgmt_tx++;
63 }
64 188
65 htt->num_pending_tx++; 189 htt->num_pending_mgmt_tx++;
66 if (htt->num_pending_tx == htt->max_num_pending_tx)
67 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
68 190
69exit: 191 return 0;
70 spin_unlock_bh(&htt->tx_lock); 192}
71 return ret; 193
194void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
195{
196 lockdep_assert_held(&htt->tx_lock);
197
198 if (!htt->ar->hw_params.max_probe_resp_desc_thres)
199 return;
200
201 htt->num_pending_mgmt_tx--;
72} 202}
73 203
74int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) 204int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
@@ -209,8 +339,18 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
209 goto free_frag_desc; 339 goto free_frag_desc;
210 } 340 }
211 341
342 size = roundup_pow_of_two(htt->max_num_pending_tx);
343 ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
344 if (ret) {
345 ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
346 goto free_txq;
347 }
348
212 return 0; 349 return 0;
213 350
351free_txq:
352 ath10k_htt_tx_free_txq(htt);
353
214free_frag_desc: 354free_frag_desc:
215 ath10k_htt_tx_free_cont_frag_desc(htt); 355 ath10k_htt_tx_free_cont_frag_desc(htt);
216 356
@@ -234,8 +374,8 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
234 374
235 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); 375 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
236 376
237 tx_done.discard = 1;
238 tx_done.msdu_id = msdu_id; 377 tx_done.msdu_id = msdu_id;
378 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
239 379
240 ath10k_txrx_tx_unref(htt, &tx_done); 380 ath10k_txrx_tx_unref(htt, &tx_done);
241 381
@@ -258,6 +398,8 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
258 398
259 ath10k_htt_tx_free_txq(htt); 399 ath10k_htt_tx_free_txq(htt);
260 ath10k_htt_tx_free_cont_frag_desc(htt); 400 ath10k_htt_tx_free_cont_frag_desc(htt);
401 WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
402 kfifo_free(&htt->txdone_fifo);
261} 403}
262 404
263void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 405void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -535,6 +677,55 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
535 return 0; 677 return 0;
536} 678}
537 679
680int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
681 __le32 token,
682 __le16 fetch_seq_num,
683 struct htt_tx_fetch_record *records,
684 size_t num_records)
685{
686 struct sk_buff *skb;
687 struct htt_cmd *cmd;
688 const u16 resp_id = 0;
689 int len = 0;
690 int ret;
691
692 /* Response IDs are echo-ed back only for host driver convienence
693 * purposes. They aren't used for anything in the driver yet so use 0.
694 */
695
696 len += sizeof(cmd->hdr);
697 len += sizeof(cmd->tx_fetch_resp);
698 len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
699
700 skb = ath10k_htc_alloc_skb(ar, len);
701 if (!skb)
702 return -ENOMEM;
703
704 skb_put(skb, len);
705 cmd = (struct htt_cmd *)skb->data;
706 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
707 cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
708 cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
709 cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
710 cmd->tx_fetch_resp.token = token;
711
712 memcpy(cmd->tx_fetch_resp.records, records,
713 sizeof(records[0]) * num_records);
714
715 ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
716 if (ret) {
717 ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
718 goto err_free_skb;
719 }
720
721 return 0;
722
723err_free_skb:
724 dev_kfree_skb_any(skb);
725
726 return ret;
727}
728
538static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) 729static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
539{ 730{
540 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 731 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -576,20 +767,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
576 int msdu_id = -1; 767 int msdu_id = -1;
577 int res; 768 int res;
578 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 769 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
579 bool limit_mgmt_desc = false;
580 bool is_probe_resp = false;
581
582 if (ar->hw_params.max_probe_resp_desc_thres) {
583 limit_mgmt_desc = true;
584
585 if (ieee80211_is_probe_resp(hdr->frame_control))
586 is_probe_resp = true;
587 }
588
589 res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
590
591 if (res)
592 goto err;
593 770
594 len += sizeof(cmd->hdr); 771 len += sizeof(cmd->hdr);
595 len += sizeof(cmd->mgmt_tx); 772 len += sizeof(cmd->mgmt_tx);
@@ -598,7 +775,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
598 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 775 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
599 spin_unlock_bh(&htt->tx_lock); 776 spin_unlock_bh(&htt->tx_lock);
600 if (res < 0) 777 if (res < 0)
601 goto err_tx_dec; 778 goto err;
602 779
603 msdu_id = res; 780 msdu_id = res;
604 781
@@ -649,8 +826,6 @@ err_free_msdu_id:
649 spin_lock_bh(&htt->tx_lock); 826 spin_lock_bh(&htt->tx_lock);
650 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 827 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
651 spin_unlock_bh(&htt->tx_lock); 828 spin_unlock_bh(&htt->tx_lock);
652err_tx_dec:
653 ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
654err: 829err:
655 return res; 830 return res;
656} 831}
@@ -677,26 +852,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
677 u32 frags_paddr = 0; 852 u32 frags_paddr = 0;
678 u32 txbuf_paddr; 853 u32 txbuf_paddr;
679 struct htt_msdu_ext_desc *ext_desc = NULL; 854 struct htt_msdu_ext_desc *ext_desc = NULL;
680 bool limit_mgmt_desc = false;
681 bool is_probe_resp = false;
682
683 if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
684 ar->hw_params.max_probe_resp_desc_thres) {
685 limit_mgmt_desc = true;
686
687 if (ieee80211_is_probe_resp(hdr->frame_control))
688 is_probe_resp = true;
689 }
690
691 res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
692 if (res)
693 goto err;
694 855
695 spin_lock_bh(&htt->tx_lock); 856 spin_lock_bh(&htt->tx_lock);
696 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 857 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
697 spin_unlock_bh(&htt->tx_lock); 858 spin_unlock_bh(&htt->tx_lock);
698 if (res < 0) 859 if (res < 0)
699 goto err_tx_dec; 860 goto err;
700 861
701 msdu_id = res; 862 msdu_id = res;
702 863
@@ -862,11 +1023,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
862err_unmap_msdu: 1023err_unmap_msdu:
863 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 1024 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
864err_free_msdu_id: 1025err_free_msdu_id:
865 spin_lock_bh(&htt->tx_lock);
866 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 1026 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
867 spin_unlock_bh(&htt->tx_lock);
868err_tx_dec:
869 ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
870err: 1027err:
871 return res; 1028 return res;
872} 1029}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index f0cfbc745c97..c0179bc4af29 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -134,8 +134,6 @@ enum qca9377_chip_id_rev {
134 134
135#define REG_DUMP_COUNT_QCA988X 60 135#define REG_DUMP_COUNT_QCA988X 60
136 136
137#define QCA988X_CAL_DATA_LEN 2116
138
139struct ath10k_fw_ie { 137struct ath10k_fw_ie {
140 __le32 id; 138 __le32 id;
141 __le32 len; 139 __le32 len;
@@ -431,10 +429,14 @@ enum ath10k_hw_4addr_pad {
431#define TARGET_10_4_ACTIVE_PEERS 0 429#define TARGET_10_4_ACTIVE_PEERS 0
432 430
433#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512 431#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512
432#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
433#define TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC 35
434#define TARGET_10_4_NUM_OFFLOAD_PEERS 0 434#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
435#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0 435#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0
436#define TARGET_10_4_NUM_PEER_KEYS 2 436#define TARGET_10_4_NUM_PEER_KEYS 2
437#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2) 437#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2)
438#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400)
439#define TARGET_10_4_NUM_MSDU_DESC_PFC 2500
438#define TARGET_10_4_AST_SKID_LIMIT 32 440#define TARGET_10_4_AST_SKID_LIMIT 32
439 441
440/* 100 ms for video, best-effort, and background */ 442/* 100 ms for video, best-effort, and background */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 78999c9de23b..6ace10bc96f5 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -482,7 +482,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
482 enum wmi_phy_mode phymode = MODE_UNKNOWN; 482 enum wmi_phy_mode phymode = MODE_UNKNOWN;
483 483
484 switch (chandef->chan->band) { 484 switch (chandef->chan->band) {
485 case IEEE80211_BAND_2GHZ: 485 case NL80211_BAND_2GHZ:
486 switch (chandef->width) { 486 switch (chandef->width) {
487 case NL80211_CHAN_WIDTH_20_NOHT: 487 case NL80211_CHAN_WIDTH_20_NOHT:
488 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) 488 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
@@ -505,7 +505,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
505 break; 505 break;
506 } 506 }
507 break; 507 break;
508 case IEEE80211_BAND_5GHZ: 508 case NL80211_BAND_5GHZ:
509 switch (chandef->width) { 509 switch (chandef->width) {
510 case NL80211_CHAN_WIDTH_20_NOHT: 510 case NL80211_CHAN_WIDTH_20_NOHT:
511 phymode = MODE_11A; 511 phymode = MODE_11A;
@@ -618,10 +618,15 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
618 *def = &conf->def; 618 *def = &conf->def;
619} 619}
620 620
621static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr, 621static int ath10k_peer_create(struct ath10k *ar,
622 struct ieee80211_vif *vif,
623 struct ieee80211_sta *sta,
624 u32 vdev_id,
625 const u8 *addr,
622 enum wmi_peer_type peer_type) 626 enum wmi_peer_type peer_type)
623{ 627{
624 struct ath10k_vif *arvif; 628 struct ath10k_vif *arvif;
629 struct ath10k_peer *peer;
625 int num_peers = 0; 630 int num_peers = 0;
626 int ret; 631 int ret;
627 632
@@ -650,6 +655,22 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
650 return ret; 655 return ret;
651 } 656 }
652 657
658 spin_lock_bh(&ar->data_lock);
659
660 peer = ath10k_peer_find(ar, vdev_id, addr);
661 if (!peer) {
662 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
663 addr, vdev_id);
664 ath10k_wmi_peer_delete(ar, vdev_id, addr);
665 spin_unlock_bh(&ar->data_lock);
666 return -ENOENT;
667 }
668
669 peer->vif = vif;
670 peer->sta = sta;
671
672 spin_unlock_bh(&ar->data_lock);
673
653 ar->num_peers++; 674 ar->num_peers++;
654 675
655 return 0; 676 return 0;
@@ -731,6 +752,7 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
731static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) 752static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
732{ 753{
733 struct ath10k_peer *peer, *tmp; 754 struct ath10k_peer *peer, *tmp;
755 int peer_id;
734 756
735 lockdep_assert_held(&ar->conf_mutex); 757 lockdep_assert_held(&ar->conf_mutex);
736 758
@@ -742,6 +764,11 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
742 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", 764 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
743 peer->addr, vdev_id); 765 peer->addr, vdev_id);
744 766
767 for_each_set_bit(peer_id, peer->peer_ids,
768 ATH10K_MAX_NUM_PEER_IDS) {
769 ar->peer_map[peer_id] = NULL;
770 }
771
745 list_del(&peer->list); 772 list_del(&peer->list);
746 kfree(peer); 773 kfree(peer);
747 ar->num_peers--; 774 ar->num_peers--;
@@ -2028,7 +2055,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2028 struct cfg80211_chan_def def; 2055 struct cfg80211_chan_def def;
2029 const struct ieee80211_supported_band *sband; 2056 const struct ieee80211_supported_band *sband;
2030 const struct ieee80211_rate *rates; 2057 const struct ieee80211_rate *rates;
2031 enum ieee80211_band band; 2058 enum nl80211_band band;
2032 u32 ratemask; 2059 u32 ratemask;
2033 u8 rate; 2060 u8 rate;
2034 int i; 2061 int i;
@@ -2088,7 +2115,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2088 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2115 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2089 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2116 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2090 struct cfg80211_chan_def def; 2117 struct cfg80211_chan_def def;
2091 enum ieee80211_band band; 2118 enum nl80211_band band;
2092 const u8 *ht_mcs_mask; 2119 const u8 *ht_mcs_mask;
2093 const u16 *vht_mcs_mask; 2120 const u16 *vht_mcs_mask;
2094 int i, n; 2121 int i, n;
@@ -2312,7 +2339,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2312 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 2339 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2313 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2340 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2314 struct cfg80211_chan_def def; 2341 struct cfg80211_chan_def def;
2315 enum ieee80211_band band; 2342 enum nl80211_band band;
2316 const u16 *vht_mcs_mask; 2343 const u16 *vht_mcs_mask;
2317 u8 ampdu_factor; 2344 u8 ampdu_factor;
2318 2345
@@ -2330,7 +2357,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2330 2357
2331 arg->peer_flags |= ar->wmi.peer_flags->vht; 2358 arg->peer_flags |= ar->wmi.peer_flags->vht;
2332 2359
2333 if (def.chan->band == IEEE80211_BAND_2GHZ) 2360 if (def.chan->band == NL80211_BAND_2GHZ)
2334 arg->peer_flags |= ar->wmi.peer_flags->vht_2g; 2361 arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2335 2362
2336 arg->peer_vht_caps = vht_cap->cap; 2363 arg->peer_vht_caps = vht_cap->cap;
@@ -2399,7 +2426,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2399 2426
2400static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2427static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2401{ 2428{
2402 return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 2429 return sta->supp_rates[NL80211_BAND_2GHZ] >>
2403 ATH10K_MAC_FIRST_OFDM_RATE_IDX; 2430 ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2404} 2431}
2405 2432
@@ -2410,7 +2437,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2410{ 2437{
2411 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2438 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2412 struct cfg80211_chan_def def; 2439 struct cfg80211_chan_def def;
2413 enum ieee80211_band band; 2440 enum nl80211_band band;
2414 const u8 *ht_mcs_mask; 2441 const u8 *ht_mcs_mask;
2415 const u16 *vht_mcs_mask; 2442 const u16 *vht_mcs_mask;
2416 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2443 enum wmi_phy_mode phymode = MODE_UNKNOWN;
@@ -2423,7 +2450,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2423 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2450 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2424 2451
2425 switch (band) { 2452 switch (band) {
2426 case IEEE80211_BAND_2GHZ: 2453 case NL80211_BAND_2GHZ:
2427 if (sta->vht_cap.vht_supported && 2454 if (sta->vht_cap.vht_supported &&
2428 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2455 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2429 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2456 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
@@ -2443,7 +2470,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2443 } 2470 }
2444 2471
2445 break; 2472 break;
2446 case IEEE80211_BAND_5GHZ: 2473 case NL80211_BAND_5GHZ:
2447 /* 2474 /*
2448 * Check VHT first. 2475 * Check VHT first.
2449 */ 2476 */
@@ -2821,7 +2848,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
2821{ 2848{
2822 struct ieee80211_hw *hw = ar->hw; 2849 struct ieee80211_hw *hw = ar->hw;
2823 struct ieee80211_supported_band **bands; 2850 struct ieee80211_supported_band **bands;
2824 enum ieee80211_band band; 2851 enum nl80211_band band;
2825 struct ieee80211_channel *channel; 2852 struct ieee80211_channel *channel;
2826 struct wmi_scan_chan_list_arg arg = {0}; 2853 struct wmi_scan_chan_list_arg arg = {0};
2827 struct wmi_channel_arg *ch; 2854 struct wmi_channel_arg *ch;
@@ -2833,7 +2860,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
2833 lockdep_assert_held(&ar->conf_mutex); 2860 lockdep_assert_held(&ar->conf_mutex);
2834 2861
2835 bands = hw->wiphy->bands; 2862 bands = hw->wiphy->bands;
2836 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 2863 for (band = 0; band < NUM_NL80211_BANDS; band++) {
2837 if (!bands[band]) 2864 if (!bands[band])
2838 continue; 2865 continue;
2839 2866
@@ -2852,7 +2879,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
2852 return -ENOMEM; 2879 return -ENOMEM;
2853 2880
2854 ch = arg.channels; 2881 ch = arg.channels;
2855 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 2882 for (band = 0; band < NUM_NL80211_BANDS; band++) {
2856 if (!bands[band]) 2883 if (!bands[band])
2857 continue; 2884 continue;
2858 2885
@@ -2890,7 +2917,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
2890 /* FIXME: why use only legacy modes, why not any 2917 /* FIXME: why use only legacy modes, why not any
2891 * HT/VHT modes? Would that even make any 2918 * HT/VHT modes? Would that even make any
2892 * difference? */ 2919 * difference? */
2893 if (channel->band == IEEE80211_BAND_2GHZ) 2920 if (channel->band == NL80211_BAND_2GHZ)
2894 ch->mode = MODE_11G; 2921 ch->mode = MODE_11G;
2895 else 2922 else
2896 ch->mode = MODE_11A; 2923 ch->mode = MODE_11A;
@@ -2994,6 +3021,13 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
2994/* TX handlers */ 3021/* TX handlers */
2995/***************/ 3022/***************/
2996 3023
3024enum ath10k_mac_tx_path {
3025 ATH10K_MAC_TX_HTT,
3026 ATH10K_MAC_TX_HTT_MGMT,
3027 ATH10K_MAC_TX_WMI_MGMT,
3028 ATH10K_MAC_TX_UNKNOWN,
3029};
3030
2997void ath10k_mac_tx_lock(struct ath10k *ar, int reason) 3031void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
2998{ 3032{
2999 lockdep_assert_held(&ar->htt.tx_lock); 3033 lockdep_assert_held(&ar->htt.tx_lock);
@@ -3271,6 +3305,28 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3271 } 3305 }
3272} 3306}
3273 3307
3308static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3309 struct ieee80211_vif *vif,
3310 struct ieee80211_txq *txq,
3311 struct sk_buff *skb)
3312{
3313 struct ieee80211_hdr *hdr = (void *)skb->data;
3314 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3315
3316 cb->flags = 0;
3317 if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3318 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3319
3320 if (ieee80211_is_mgmt(hdr->frame_control))
3321 cb->flags |= ATH10K_SKB_F_MGMT;
3322
3323 if (ieee80211_is_data_qos(hdr->frame_control))
3324 cb->flags |= ATH10K_SKB_F_QOS;
3325
3326 cb->vif = vif;
3327 cb->txq = txq;
3328}
3329
3274bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) 3330bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3275{ 3331{
3276 /* FIXME: Not really sure since when the behaviour changed. At some 3332 /* FIXME: Not really sure since when the behaviour changed. At some
@@ -3306,26 +3362,50 @@ unlock:
3306 return ret; 3362 return ret;
3307} 3363}
3308 3364
3309static void ath10k_mac_tx(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode, 3365static enum ath10k_mac_tx_path
3310 struct sk_buff *skb) 3366ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3367 struct sk_buff *skb,
3368 enum ath10k_hw_txrx_mode txmode)
3311{ 3369{
3312 struct ath10k_htt *htt = &ar->htt;
3313 int ret = 0;
3314
3315 switch (txmode) { 3370 switch (txmode) {
3316 case ATH10K_HW_TXRX_RAW: 3371 case ATH10K_HW_TXRX_RAW:
3317 case ATH10K_HW_TXRX_NATIVE_WIFI: 3372 case ATH10K_HW_TXRX_NATIVE_WIFI:
3318 case ATH10K_HW_TXRX_ETHERNET: 3373 case ATH10K_HW_TXRX_ETHERNET:
3319 ret = ath10k_htt_tx(htt, txmode, skb); 3374 return ATH10K_MAC_TX_HTT;
3320 break;
3321 case ATH10K_HW_TXRX_MGMT: 3375 case ATH10K_HW_TXRX_MGMT:
3322 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3376 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3323 ar->fw_features)) 3377 ar->fw_features))
3324 ret = ath10k_mac_tx_wmi_mgmt(ar, skb); 3378 return ATH10K_MAC_TX_WMI_MGMT;
3325 else if (ar->htt.target_version_major >= 3) 3379 else if (ar->htt.target_version_major >= 3)
3326 ret = ath10k_htt_tx(htt, txmode, skb); 3380 return ATH10K_MAC_TX_HTT;
3327 else 3381 else
3328 ret = ath10k_htt_mgmt_tx(htt, skb); 3382 return ATH10K_MAC_TX_HTT_MGMT;
3383 }
3384
3385 return ATH10K_MAC_TX_UNKNOWN;
3386}
3387
3388static int ath10k_mac_tx_submit(struct ath10k *ar,
3389 enum ath10k_hw_txrx_mode txmode,
3390 enum ath10k_mac_tx_path txpath,
3391 struct sk_buff *skb)
3392{
3393 struct ath10k_htt *htt = &ar->htt;
3394 int ret = -EINVAL;
3395
3396 switch (txpath) {
3397 case ATH10K_MAC_TX_HTT:
3398 ret = ath10k_htt_tx(htt, txmode, skb);
3399 break;
3400 case ATH10K_MAC_TX_HTT_MGMT:
3401 ret = ath10k_htt_mgmt_tx(htt, skb);
3402 break;
3403 case ATH10K_MAC_TX_WMI_MGMT:
3404 ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
3405 break;
3406 case ATH10K_MAC_TX_UNKNOWN:
3407 WARN_ON_ONCE(1);
3408 ret = -EINVAL;
3329 break; 3409 break;
3330 } 3410 }
3331 3411
@@ -3334,6 +3414,64 @@ static void ath10k_mac_tx(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode,
3334 ret); 3414 ret);
3335 ieee80211_free_txskb(ar->hw, skb); 3415 ieee80211_free_txskb(ar->hw, skb);
3336 } 3416 }
3417
3418 return ret;
3419}
3420
3421/* This function consumes the sk_buff regardless of return value as far as
3422 * caller is concerned so no freeing is necessary afterwards.
3423 */
3424static int ath10k_mac_tx(struct ath10k *ar,
3425 struct ieee80211_vif *vif,
3426 struct ieee80211_sta *sta,
3427 enum ath10k_hw_txrx_mode txmode,
3428 enum ath10k_mac_tx_path txpath,
3429 struct sk_buff *skb)
3430{
3431 struct ieee80211_hw *hw = ar->hw;
3432 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3433 int ret;
3434
3435 /* We should disable CCK RATE due to P2P */
3436 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3437 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3438
3439 switch (txmode) {
3440 case ATH10K_HW_TXRX_MGMT:
3441 case ATH10K_HW_TXRX_NATIVE_WIFI:
3442 ath10k_tx_h_nwifi(hw, skb);
3443 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
3444 ath10k_tx_h_seq_no(vif, skb);
3445 break;
3446 case ATH10K_HW_TXRX_ETHERNET:
3447 ath10k_tx_h_8023(skb);
3448 break;
3449 case ATH10K_HW_TXRX_RAW:
3450 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3451 WARN_ON_ONCE(1);
3452 ieee80211_free_txskb(hw, skb);
3453 return -ENOTSUPP;
3454 }
3455 }
3456
3457 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3458 if (!ath10k_mac_tx_frm_has_freq(ar)) {
3459 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
3460 skb);
3461
3462 skb_queue_tail(&ar->offchan_tx_queue, skb);
3463 ieee80211_queue_work(hw, &ar->offchan_tx_work);
3464 return 0;
3465 }
3466 }
3467
3468 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
3469 if (ret) {
3470 ath10k_warn(ar, "failed to submit frame: %d\n", ret);
3471 return ret;
3472 }
3473
3474 return 0;
3337} 3475}
3338 3476
3339void ath10k_offchan_tx_purge(struct ath10k *ar) 3477void ath10k_offchan_tx_purge(struct ath10k *ar)
@@ -3354,12 +3492,13 @@ void ath10k_offchan_tx_work(struct work_struct *work)
3354 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); 3492 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
3355 struct ath10k_peer *peer; 3493 struct ath10k_peer *peer;
3356 struct ath10k_vif *arvif; 3494 struct ath10k_vif *arvif;
3495 enum ath10k_hw_txrx_mode txmode;
3496 enum ath10k_mac_tx_path txpath;
3357 struct ieee80211_hdr *hdr; 3497 struct ieee80211_hdr *hdr;
3358 struct ieee80211_vif *vif; 3498 struct ieee80211_vif *vif;
3359 struct ieee80211_sta *sta; 3499 struct ieee80211_sta *sta;
3360 struct sk_buff *skb; 3500 struct sk_buff *skb;
3361 const u8 *peer_addr; 3501 const u8 *peer_addr;
3362 enum ath10k_hw_txrx_mode txmode;
3363 int vdev_id; 3502 int vdev_id;
3364 int ret; 3503 int ret;
3365 unsigned long time_left; 3504 unsigned long time_left;
@@ -3396,7 +3535,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
3396 peer_addr, vdev_id); 3535 peer_addr, vdev_id);
3397 3536
3398 if (!peer) { 3537 if (!peer) {
3399 ret = ath10k_peer_create(ar, vdev_id, peer_addr, 3538 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
3539 peer_addr,
3400 WMI_PEER_TYPE_DEFAULT); 3540 WMI_PEER_TYPE_DEFAULT);
3401 if (ret) 3541 if (ret)
3402 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", 3542 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
@@ -3423,8 +3563,14 @@ void ath10k_offchan_tx_work(struct work_struct *work)
3423 } 3563 }
3424 3564
3425 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3565 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3566 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3426 3567
3427 ath10k_mac_tx(ar, txmode, skb); 3568 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3569 if (ret) {
3570 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3571 ret);
3572 /* not serious */
3573 }
3428 3574
3429 time_left = 3575 time_left =
3430 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); 3576 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
@@ -3476,6 +3622,175 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3476 } 3622 }
3477} 3623}
3478 3624
3625static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
3626{
3627 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3628
3629 if (!txq)
3630 return;
3631
3632 INIT_LIST_HEAD(&artxq->list);
3633}
3634
3635static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
3636{
3637 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3638 struct ath10k_skb_cb *cb;
3639 struct sk_buff *msdu;
3640 int msdu_id;
3641
3642 if (!txq)
3643 return;
3644
3645 spin_lock_bh(&ar->txqs_lock);
3646 if (!list_empty(&artxq->list))
3647 list_del_init(&artxq->list);
3648 spin_unlock_bh(&ar->txqs_lock);
3649
3650 spin_lock_bh(&ar->htt.tx_lock);
3651 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
3652 cb = ATH10K_SKB_CB(msdu);
3653 if (cb->txq == txq)
3654 cb->txq = NULL;
3655 }
3656 spin_unlock_bh(&ar->htt.tx_lock);
3657}
3658
3659struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
3660 u16 peer_id,
3661 u8 tid)
3662{
3663 struct ath10k_peer *peer;
3664
3665 lockdep_assert_held(&ar->data_lock);
3666
3667 peer = ar->peer_map[peer_id];
3668 if (!peer)
3669 return NULL;
3670
3671 if (peer->sta)
3672 return peer->sta->txq[tid];
3673 else if (peer->vif)
3674 return peer->vif->txq;
3675 else
3676 return NULL;
3677}
3678
3679static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
3680 struct ieee80211_txq *txq)
3681{
3682 struct ath10k *ar = hw->priv;
3683 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3684
3685 /* No need to get locks */
3686
3687 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
3688 return true;
3689
3690 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
3691 return true;
3692
3693 if (artxq->num_fw_queued < artxq->num_push_allowed)
3694 return true;
3695
3696 return false;
3697}
3698
3699int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3700 struct ieee80211_txq *txq)
3701{
3702 struct ath10k *ar = hw->priv;
3703 struct ath10k_htt *htt = &ar->htt;
3704 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3705 struct ieee80211_vif *vif = txq->vif;
3706 struct ieee80211_sta *sta = txq->sta;
3707 enum ath10k_hw_txrx_mode txmode;
3708 enum ath10k_mac_tx_path txpath;
3709 struct sk_buff *skb;
3710 size_t skb_len;
3711 int ret;
3712
3713 spin_lock_bh(&ar->htt.tx_lock);
3714 ret = ath10k_htt_tx_inc_pending(htt);
3715 spin_unlock_bh(&ar->htt.tx_lock);
3716
3717 if (ret)
3718 return ret;
3719
3720 skb = ieee80211_tx_dequeue(hw, txq);
3721 if (!skb) {
3722 spin_lock_bh(&ar->htt.tx_lock);
3723 ath10k_htt_tx_dec_pending(htt);
3724 spin_unlock_bh(&ar->htt.tx_lock);
3725
3726 return -ENOENT;
3727 }
3728
3729 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3730
3731 skb_len = skb->len;
3732 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3733 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3734
3735 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3736 if (unlikely(ret)) {
3737 ath10k_warn(ar, "failed to push frame: %d\n", ret);
3738
3739 spin_lock_bh(&ar->htt.tx_lock);
3740 ath10k_htt_tx_dec_pending(htt);
3741 spin_unlock_bh(&ar->htt.tx_lock);
3742
3743 return ret;
3744 }
3745
3746 spin_lock_bh(&ar->htt.tx_lock);
3747 artxq->num_fw_queued++;
3748 spin_unlock_bh(&ar->htt.tx_lock);
3749
3750 return skb_len;
3751}
3752
3753void ath10k_mac_tx_push_pending(struct ath10k *ar)
3754{
3755 struct ieee80211_hw *hw = ar->hw;
3756 struct ieee80211_txq *txq;
3757 struct ath10k_txq *artxq;
3758 struct ath10k_txq *last;
3759 int ret;
3760 int max;
3761
3762 spin_lock_bh(&ar->txqs_lock);
3763 rcu_read_lock();
3764
3765 last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
3766 while (!list_empty(&ar->txqs)) {
3767 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
3768 txq = container_of((void *)artxq, struct ieee80211_txq,
3769 drv_priv);
3770
3771 /* Prevent aggressive sta/tid taking over tx queue */
3772 max = 16;
3773 ret = 0;
3774 while (ath10k_mac_tx_can_push(hw, txq) && max--) {
3775 ret = ath10k_mac_tx_push_txq(hw, txq);
3776 if (ret < 0)
3777 break;
3778 }
3779
3780 list_del_init(&artxq->list);
3781 if (ret != -ENOENT)
3782 list_add_tail(&artxq->list, &ar->txqs);
3783
3784 ath10k_htt_tx_txq_update(hw, txq);
3785
3786 if (artxq == last || (ret < 0 && ret != -ENOENT))
3787 break;
3788 }
3789
3790 rcu_read_unlock();
3791 spin_unlock_bh(&ar->txqs_lock);
3792}
3793
3479/************/ 3794/************/
3480/* Scanning */ 3795/* Scanning */
3481/************/ 3796/************/
@@ -3638,66 +3953,86 @@ static int ath10k_start_scan(struct ath10k *ar,
3638/* mac80211 callbacks */ 3953/* mac80211 callbacks */
3639/**********************/ 3954/**********************/
3640 3955
3641static void ath10k_tx(struct ieee80211_hw *hw, 3956static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
3642 struct ieee80211_tx_control *control, 3957 struct ieee80211_tx_control *control,
3643 struct sk_buff *skb) 3958 struct sk_buff *skb)
3644{ 3959{
3645 struct ath10k *ar = hw->priv; 3960 struct ath10k *ar = hw->priv;
3646 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 3961 struct ath10k_htt *htt = &ar->htt;
3647 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3962 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3648 struct ieee80211_vif *vif = info->control.vif; 3963 struct ieee80211_vif *vif = info->control.vif;
3649 struct ieee80211_sta *sta = control->sta; 3964 struct ieee80211_sta *sta = control->sta;
3650 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3965 struct ieee80211_txq *txq = NULL;
3966 struct ieee80211_hdr *hdr = (void *)skb->data;
3651 enum ath10k_hw_txrx_mode txmode; 3967 enum ath10k_hw_txrx_mode txmode;
3968 enum ath10k_mac_tx_path txpath;
3969 bool is_htt;
3970 bool is_mgmt;
3971 bool is_presp;
3972 int ret;
3652 3973
3653 /* We should disable CCK RATE due to P2P */ 3974 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3654 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3655 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3656 3975
3657 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3976 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3977 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3978 is_htt = (txpath == ATH10K_MAC_TX_HTT ||
3979 txpath == ATH10K_MAC_TX_HTT_MGMT);
3980 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
3658 3981
3659 skb_cb->flags = 0; 3982 if (is_htt) {
3660 if (!ath10k_tx_h_use_hwcrypto(vif, skb)) 3983 spin_lock_bh(&ar->htt.tx_lock);
3661 skb_cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3984 is_presp = ieee80211_is_probe_resp(hdr->frame_control);
3662 3985
3663 if (ieee80211_is_mgmt(hdr->frame_control)) 3986 ret = ath10k_htt_tx_inc_pending(htt);
3664 skb_cb->flags |= ATH10K_SKB_F_MGMT; 3987 if (ret) {
3665 3988 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
3666 if (ieee80211_is_data_qos(hdr->frame_control)) 3989 ret);
3667 skb_cb->flags |= ATH10K_SKB_F_QOS; 3990 spin_unlock_bh(&ar->htt.tx_lock);
3668 3991 ieee80211_free_txskb(ar->hw, skb);
3669 skb_cb->vif = vif; 3992 return;
3993 }
3670 3994
3671 switch (txmode) { 3995 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
3672 case ATH10K_HW_TXRX_MGMT: 3996 if (ret) {
3673 case ATH10K_HW_TXRX_NATIVE_WIFI: 3997 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
3674 ath10k_tx_h_nwifi(hw, skb); 3998 ret);
3675 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); 3999 ath10k_htt_tx_dec_pending(htt);
3676 ath10k_tx_h_seq_no(vif, skb); 4000 spin_unlock_bh(&ar->htt.tx_lock);
3677 break; 4001 ieee80211_free_txskb(ar->hw, skb);
3678 case ATH10K_HW_TXRX_ETHERNET:
3679 ath10k_tx_h_8023(skb);
3680 break;
3681 case ATH10K_HW_TXRX_RAW:
3682 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3683 WARN_ON_ONCE(1);
3684 ieee80211_free_txskb(hw, skb);
3685 return; 4002 return;
3686 } 4003 }
4004 spin_unlock_bh(&ar->htt.tx_lock);
3687 } 4005 }
3688 4006
3689 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 4007 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3690 if (!ath10k_mac_tx_frm_has_freq(ar)) { 4008 if (ret) {
3691 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", 4009 ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
3692 skb); 4010 if (is_htt) {
3693 4011 spin_lock_bh(&ar->htt.tx_lock);
3694 skb_queue_tail(&ar->offchan_tx_queue, skb); 4012 ath10k_htt_tx_dec_pending(htt);
3695 ieee80211_queue_work(hw, &ar->offchan_tx_work); 4013 if (is_mgmt)
3696 return; 4014 ath10k_htt_tx_mgmt_dec_pending(htt);
4015 spin_unlock_bh(&ar->htt.tx_lock);
3697 } 4016 }
4017 return;
3698 } 4018 }
4019}
4020
4021static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4022 struct ieee80211_txq *txq)
4023{
4024 struct ath10k *ar = hw->priv;
4025 struct ath10k_txq *artxq = (void *)txq->drv_priv;
4026
4027 spin_lock_bh(&ar->txqs_lock);
4028 if (list_empty(&artxq->list))
4029 list_add_tail(&artxq->list, &ar->txqs);
4030 spin_unlock_bh(&ar->txqs_lock);
4031
4032 if (ath10k_mac_tx_can_push(hw, txq))
4033 tasklet_schedule(&ar->htt.txrx_compl_task);
3699 4034
3700 ath10k_mac_tx(ar, txmode, skb); 4035 ath10k_htt_tx_txq_update(hw, txq);
3701} 4036}
3702 4037
3703/* Must not be called with conf_mutex held as workers can use that also. */ 4038/* Must not be called with conf_mutex held as workers can use that also. */
@@ -3919,14 +4254,14 @@ static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
3919 vht_cap = ath10k_create_vht_cap(ar); 4254 vht_cap = ath10k_create_vht_cap(ar);
3920 4255
3921 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 4256 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
3922 band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; 4257 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
3923 band->ht_cap = ht_cap; 4258 band->ht_cap = ht_cap;
3924 4259
3925 /* Enable the VHT support at 2.4 GHz */ 4260 /* Enable the VHT support at 2.4 GHz */
3926 band->vht_cap = vht_cap; 4261 band->vht_cap = vht_cap;
3927 } 4262 }
3928 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 4263 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
3929 band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; 4264 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
3930 band->ht_cap = ht_cap; 4265 band->ht_cap = ht_cap;
3931 band->vht_cap = vht_cap; 4266 band->vht_cap = vht_cap;
3932 } 4267 }
@@ -4100,7 +4435,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
4100 4435
4101 ar->ani_enabled = true; 4436 ar->ani_enabled = true;
4102 4437
4103 if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { 4438 if (ath10k_peer_stats_enabled(ar)) {
4104 param = ar->wmi.pdev_param->peer_stats_update_period; 4439 param = ar->wmi.pdev_param->peer_stats_update_period;
4105 ret = ath10k_wmi_pdev_set_param(ar, param, 4440 ret = ath10k_wmi_pdev_set_param(ar, param,
4106 PEER_DEFAULT_STATS_UPDATE_PERIOD); 4441 PEER_DEFAULT_STATS_UPDATE_PERIOD);
@@ -4313,6 +4648,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
4313{ 4648{
4314 struct ath10k *ar = hw->priv; 4649 struct ath10k *ar = hw->priv;
4315 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 4650 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4651 struct ath10k_peer *peer;
4316 enum wmi_sta_powersave_param param; 4652 enum wmi_sta_powersave_param param;
4317 int ret = 0; 4653 int ret = 0;
4318 u32 value; 4654 u32 value;
@@ -4325,6 +4661,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
4325 mutex_lock(&ar->conf_mutex); 4661 mutex_lock(&ar->conf_mutex);
4326 4662
4327 memset(arvif, 0, sizeof(*arvif)); 4663 memset(arvif, 0, sizeof(*arvif));
4664 ath10k_mac_txq_init(vif->txq);
4328 4665
4329 arvif->ar = ar; 4666 arvif->ar = ar;
4330 arvif->vif = vif; 4667 arvif->vif = vif;
@@ -4489,7 +4826,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
4489 goto err_vdev_delete; 4826 goto err_vdev_delete;
4490 } 4827 }
4491 4828
4492 if (ar->cfg_tx_chainmask) { 4829 /* Configuring number of spatial stream for monitor interface is causing
4830 * target assert in qca9888 and qca6174.
4831 */
4832 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
4493 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); 4833 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
4494 4834
4495 vdev_param = ar->wmi.vdev_param->nss; 4835 vdev_param = ar->wmi.vdev_param->nss;
@@ -4505,13 +4845,31 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
4505 4845
4506 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 4846 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4507 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 4847 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
4508 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr, 4848 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
4509 WMI_PEER_TYPE_DEFAULT); 4849 vif->addr, WMI_PEER_TYPE_DEFAULT);
4510 if (ret) { 4850 if (ret) {
4511 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", 4851 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
4512 arvif->vdev_id, ret); 4852 arvif->vdev_id, ret);
4513 goto err_vdev_delete; 4853 goto err_vdev_delete;
4514 } 4854 }
4855
4856 spin_lock_bh(&ar->data_lock);
4857
4858 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
4859 if (!peer) {
4860 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
4861 vif->addr, arvif->vdev_id);
4862 spin_unlock_bh(&ar->data_lock);
4863 ret = -ENOENT;
4864 goto err_peer_delete;
4865 }
4866
4867 arvif->peer_id = find_first_bit(peer->peer_ids,
4868 ATH10K_MAX_NUM_PEER_IDS);
4869
4870 spin_unlock_bh(&ar->data_lock);
4871 } else {
4872 arvif->peer_id = HTT_INVALID_PEERID;
4515 } 4873 }
4516 4874
4517 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 4875 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
@@ -4622,7 +4980,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
4622{ 4980{
4623 struct ath10k *ar = hw->priv; 4981 struct ath10k *ar = hw->priv;
4624 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 4982 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4983 struct ath10k_peer *peer;
4625 int ret; 4984 int ret;
4985 int i;
4626 4986
4627 cancel_work_sync(&arvif->ap_csa_work); 4987 cancel_work_sync(&arvif->ap_csa_work);
4628 cancel_delayed_work_sync(&arvif->connection_loss_work); 4988 cancel_delayed_work_sync(&arvif->connection_loss_work);
@@ -4676,7 +5036,22 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
4676 spin_unlock_bh(&ar->data_lock); 5036 spin_unlock_bh(&ar->data_lock);
4677 } 5037 }
4678 5038
5039 spin_lock_bh(&ar->data_lock);
5040 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5041 peer = ar->peer_map[i];
5042 if (!peer)
5043 continue;
5044
5045 if (peer->vif == vif) {
5046 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5047 vif->addr, arvif->vdev_id);
5048 peer->vif = NULL;
5049 }
5050 }
5051 spin_unlock_bh(&ar->data_lock);
5052
4679 ath10k_peer_cleanup(ar, arvif->vdev_id); 5053 ath10k_peer_cleanup(ar, arvif->vdev_id);
5054 ath10k_mac_txq_unref(ar, vif->txq);
4680 5055
4681 if (vif->type == NL80211_IFTYPE_MONITOR) { 5056 if (vif->type == NL80211_IFTYPE_MONITOR) {
4682 ar->monitor_arvif = NULL; 5057 ar->monitor_arvif = NULL;
@@ -4689,6 +5064,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
4689 ath10k_mac_vif_tx_unlock_all(arvif); 5064 ath10k_mac_vif_tx_unlock_all(arvif);
4690 spin_unlock_bh(&ar->htt.tx_lock); 5065 spin_unlock_bh(&ar->htt.tx_lock);
4691 5066
5067 ath10k_mac_txq_unref(ar, vif->txq);
5068
4692 mutex_unlock(&ar->conf_mutex); 5069 mutex_unlock(&ar->conf_mutex);
4693} 5070}
4694 5071
@@ -5218,7 +5595,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
5218 struct ath10k_sta *arsta; 5595 struct ath10k_sta *arsta;
5219 struct ieee80211_sta *sta; 5596 struct ieee80211_sta *sta;
5220 struct cfg80211_chan_def def; 5597 struct cfg80211_chan_def def;
5221 enum ieee80211_band band; 5598 enum nl80211_band band;
5222 const u8 *ht_mcs_mask; 5599 const u8 *ht_mcs_mask;
5223 const u16 *vht_mcs_mask; 5600 const u16 *vht_mcs_mask;
5224 u32 changed, bw, nss, smps; 5601 u32 changed, bw, nss, smps;
@@ -5393,13 +5770,18 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
5393 struct ath10k *ar = hw->priv; 5770 struct ath10k *ar = hw->priv;
5394 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 5771 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5395 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5772 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5773 struct ath10k_peer *peer;
5396 int ret = 0; 5774 int ret = 0;
5775 int i;
5397 5776
5398 if (old_state == IEEE80211_STA_NOTEXIST && 5777 if (old_state == IEEE80211_STA_NOTEXIST &&
5399 new_state == IEEE80211_STA_NONE) { 5778 new_state == IEEE80211_STA_NONE) {
5400 memset(arsta, 0, sizeof(*arsta)); 5779 memset(arsta, 0, sizeof(*arsta));
5401 arsta->arvif = arvif; 5780 arsta->arvif = arvif;
5402 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); 5781 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
5782
5783 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5784 ath10k_mac_txq_init(sta->txq[i]);
5403 } 5785 }
5404 5786
5405 /* cancel must be done outside the mutex to avoid deadlock */ 5787 /* cancel must be done outside the mutex to avoid deadlock */
@@ -5434,8 +5816,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
5434 if (sta->tdls) 5816 if (sta->tdls)
5435 peer_type = WMI_PEER_TYPE_TDLS; 5817 peer_type = WMI_PEER_TYPE_TDLS;
5436 5818
5437 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr, 5819 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
5438 peer_type); 5820 sta->addr, peer_type);
5439 if (ret) { 5821 if (ret) {
5440 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", 5822 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
5441 sta->addr, arvif->vdev_id, ret); 5823 sta->addr, arvif->vdev_id, ret);
@@ -5443,6 +5825,24 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
5443 goto exit; 5825 goto exit;
5444 } 5826 }
5445 5827
5828 spin_lock_bh(&ar->data_lock);
5829
5830 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
5831 if (!peer) {
5832 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5833 vif->addr, arvif->vdev_id);
5834 spin_unlock_bh(&ar->data_lock);
5835 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5836 ath10k_mac_dec_num_stations(arvif, sta);
5837 ret = -ENOENT;
5838 goto exit;
5839 }
5840
5841 arsta->peer_id = find_first_bit(peer->peer_ids,
5842 ATH10K_MAX_NUM_PEER_IDS);
5843
5844 spin_unlock_bh(&ar->data_lock);
5845
5446 if (!sta->tdls) 5846 if (!sta->tdls)
5447 goto exit; 5847 goto exit;
5448 5848
@@ -5505,6 +5905,23 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
5505 5905
5506 ath10k_mac_dec_num_stations(arvif, sta); 5906 ath10k_mac_dec_num_stations(arvif, sta);
5507 5907
5908 spin_lock_bh(&ar->data_lock);
5909 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5910 peer = ar->peer_map[i];
5911 if (!peer)
5912 continue;
5913
5914 if (peer->sta == sta) {
5915 ath10k_warn(ar, "found sta peer %pM entry on vdev %i after it was supposedly removed\n",
5916 sta->addr, arvif->vdev_id);
5917 peer->sta = NULL;
5918 }
5919 }
5920 spin_unlock_bh(&ar->data_lock);
5921
5922 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5923 ath10k_mac_txq_unref(ar, sta->txq[i]);
5924
5508 if (!sta->tdls) 5925 if (!sta->tdls)
5509 goto exit; 5926 goto exit;
5510 5927
@@ -5977,14 +6394,14 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
5977 6394
5978 mutex_lock(&ar->conf_mutex); 6395 mutex_lock(&ar->conf_mutex);
5979 6396
5980 sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ]; 6397 sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
5981 if (sband && idx >= sband->n_channels) { 6398 if (sband && idx >= sband->n_channels) {
5982 idx -= sband->n_channels; 6399 idx -= sband->n_channels;
5983 sband = NULL; 6400 sband = NULL;
5984 } 6401 }
5985 6402
5986 if (!sband) 6403 if (!sband)
5987 sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ]; 6404 sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
5988 6405
5989 if (!sband || idx >= sband->n_channels) { 6406 if (!sband || idx >= sband->n_channels) {
5990 ret = -ENOENT; 6407 ret = -ENOENT;
@@ -6007,7 +6424,7 @@ exit:
6007 6424
6008static bool 6425static bool
6009ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, 6426ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6010 enum ieee80211_band band, 6427 enum nl80211_band band,
6011 const struct cfg80211_bitrate_mask *mask) 6428 const struct cfg80211_bitrate_mask *mask)
6012{ 6429{
6013 int num_rates = 0; 6430 int num_rates = 0;
@@ -6026,7 +6443,7 @@ ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6026 6443
6027static bool 6444static bool
6028ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, 6445ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6029 enum ieee80211_band band, 6446 enum nl80211_band band,
6030 const struct cfg80211_bitrate_mask *mask, 6447 const struct cfg80211_bitrate_mask *mask,
6031 int *nss) 6448 int *nss)
6032{ 6449{
@@ -6075,7 +6492,7 @@ ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6075 6492
6076static int 6493static int
6077ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, 6494ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6078 enum ieee80211_band band, 6495 enum nl80211_band band,
6079 const struct cfg80211_bitrate_mask *mask, 6496 const struct cfg80211_bitrate_mask *mask,
6080 u8 *rate, u8 *nss) 6497 u8 *rate, u8 *nss)
6081{ 6498{
@@ -6176,7 +6593,7 @@ static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
6176 6593
6177static bool 6594static bool
6178ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, 6595ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
6179 enum ieee80211_band band, 6596 enum nl80211_band band,
6180 const struct cfg80211_bitrate_mask *mask) 6597 const struct cfg80211_bitrate_mask *mask)
6181{ 6598{
6182 int i; 6599 int i;
@@ -6228,7 +6645,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
6228 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 6645 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6229 struct cfg80211_chan_def def; 6646 struct cfg80211_chan_def def;
6230 struct ath10k *ar = arvif->ar; 6647 struct ath10k *ar = arvif->ar;
6231 enum ieee80211_band band; 6648 enum nl80211_band band;
6232 const u8 *ht_mcs_mask; 6649 const u8 *ht_mcs_mask;
6233 const u16 *vht_mcs_mask; 6650 const u16 *vht_mcs_mask;
6234 u8 rate; 6651 u8 rate;
@@ -6807,7 +7224,8 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
6807} 7224}
6808 7225
6809static const struct ieee80211_ops ath10k_ops = { 7226static const struct ieee80211_ops ath10k_ops = {
6810 .tx = ath10k_tx, 7227 .tx = ath10k_mac_op_tx,
7228 .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
6811 .start = ath10k_start, 7229 .start = ath10k_start,
6812 .stop = ath10k_stop, 7230 .stop = ath10k_stop,
6813 .config = ath10k_config, 7231 .config = ath10k_config,
@@ -6857,7 +7275,7 @@ static const struct ieee80211_ops ath10k_ops = {
6857}; 7275};
6858 7276
6859#define CHAN2G(_channel, _freq, _flags) { \ 7277#define CHAN2G(_channel, _freq, _flags) { \
6860 .band = IEEE80211_BAND_2GHZ, \ 7278 .band = NL80211_BAND_2GHZ, \
6861 .hw_value = (_channel), \ 7279 .hw_value = (_channel), \
6862 .center_freq = (_freq), \ 7280 .center_freq = (_freq), \
6863 .flags = (_flags), \ 7281 .flags = (_flags), \
@@ -6866,7 +7284,7 @@ static const struct ieee80211_ops ath10k_ops = {
6866} 7284}
6867 7285
6868#define CHAN5G(_channel, _freq, _flags) { \ 7286#define CHAN5G(_channel, _freq, _flags) { \
6869 .band = IEEE80211_BAND_5GHZ, \ 7287 .band = NL80211_BAND_5GHZ, \
6870 .hw_value = (_channel), \ 7288 .hw_value = (_channel), \
6871 .center_freq = (_freq), \ 7289 .center_freq = (_freq), \
6872 .flags = (_flags), \ 7290 .flags = (_flags), \
@@ -7186,13 +7604,13 @@ int ath10k_mac_register(struct ath10k *ar)
7186 goto err_free; 7604 goto err_free;
7187 } 7605 }
7188 7606
7189 band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; 7607 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
7190 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 7608 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
7191 band->channels = channels; 7609 band->channels = channels;
7192 band->n_bitrates = ath10k_g_rates_size; 7610 band->n_bitrates = ath10k_g_rates_size;
7193 band->bitrates = ath10k_g_rates; 7611 band->bitrates = ath10k_g_rates;
7194 7612
7195 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; 7613 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
7196 } 7614 }
7197 7615
7198 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 7616 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
@@ -7204,12 +7622,12 @@ int ath10k_mac_register(struct ath10k *ar)
7204 goto err_free; 7622 goto err_free;
7205 } 7623 }
7206 7624
7207 band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; 7625 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
7208 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); 7626 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
7209 band->channels = channels; 7627 band->channels = channels;
7210 band->n_bitrates = ath10k_a_rates_size; 7628 band->n_bitrates = ath10k_a_rates_size;
7211 band->bitrates = ath10k_a_rates; 7629 band->bitrates = ath10k_a_rates;
7212 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band; 7630 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
7213 } 7631 }
7214 7632
7215 ath10k_mac_setup_ht_vht_cap(ar); 7633 ath10k_mac_setup_ht_vht_cap(ar);
@@ -7262,6 +7680,7 @@ int ath10k_mac_register(struct ath10k *ar)
7262 7680
7263 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 7681 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
7264 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 7682 ar->hw->sta_data_size = sizeof(struct ath10k_sta);
7683 ar->hw->txq_data_size = sizeof(struct ath10k_txq);
7265 7684
7266 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 7685 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
7267 7686
@@ -7286,7 +7705,8 @@ int ath10k_mac_register(struct ath10k *ar)
7286 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 7705 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
7287 7706
7288 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 7707 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
7289 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE; 7708 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
7709 NL80211_FEATURE_AP_SCAN;
7290 7710
7291 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; 7711 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
7292 7712
@@ -7395,8 +7815,8 @@ err_dfs_detector_exit:
7395 ar->dfs_detector->exit(ar->dfs_detector); 7815 ar->dfs_detector->exit(ar->dfs_detector);
7396 7816
7397err_free: 7817err_free:
7398 kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); 7818 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
7399 kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); 7819 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
7400 7820
7401 SET_IEEE80211_DEV(ar->hw, NULL); 7821 SET_IEEE80211_DEV(ar->hw, NULL);
7402 return ret; 7822 return ret;
@@ -7409,8 +7829,8 @@ void ath10k_mac_unregister(struct ath10k *ar)
7409 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 7829 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
7410 ar->dfs_detector->exit(ar->dfs_detector); 7830 ar->dfs_detector->exit(ar->dfs_detector);
7411 7831
7412 kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); 7832 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
7413 kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); 7833 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
7414 7834
7415 SET_IEEE80211_DEV(ar->hw, NULL); 7835 SET_IEEE80211_DEV(ar->hw, NULL);
7416} 7836}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 53091588090d..2c3327beb445 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -75,6 +75,12 @@ void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
75void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason); 75void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
76void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason); 76void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
77bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar); 77bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar);
78void ath10k_mac_tx_push_pending(struct ath10k *ar);
79int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
80 struct ieee80211_txq *txq);
81struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
82 u16 peer_id,
83 u8 tid);
78 84
79static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) 85static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
80{ 86{
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index b3cff1d3364a..0b305efe6c94 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -809,7 +809,8 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
809 spin_lock_bh(&ar_pci->ce_lock); 809 spin_lock_bh(&ar_pci->ce_lock);
810 num = __ath10k_ce_rx_num_free_bufs(ce_pipe); 810 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
811 spin_unlock_bh(&ar_pci->ce_lock); 811 spin_unlock_bh(&ar_pci->ce_lock);
812 while (num--) { 812
813 while (num >= 0) {
813 ret = __ath10k_pci_rx_post_buf(pipe); 814 ret = __ath10k_pci_rx_post_buf(pipe);
814 if (ret) { 815 if (ret) {
815 if (ret == -ENOSPC) 816 if (ret == -ENOSPC)
@@ -819,6 +820,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
819 ATH10K_PCI_RX_POST_RETRY_MS); 820 ATH10K_PCI_RX_POST_RETRY_MS);
820 break; 821 break;
821 } 822 }
823 num--;
822 } 824 }
823} 825}
824 826
@@ -870,10 +872,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
870{ 872{
871 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 873 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
872 int ret = 0; 874 int ret = 0;
873 u32 buf; 875 u32 *buf;
874 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 876 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
875 unsigned int id;
876 unsigned int flags;
877 struct ath10k_ce_pipe *ce_diag; 877 struct ath10k_ce_pipe *ce_diag;
878 /* Host buffer address in CE space */ 878 /* Host buffer address in CE space */
879 u32 ce_data; 879 u32 ce_data;
@@ -909,7 +909,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
909 nbytes = min_t(unsigned int, remaining_bytes, 909 nbytes = min_t(unsigned int, remaining_bytes,
910 DIAG_TRANSFER_LIMIT); 910 DIAG_TRANSFER_LIMIT);
911 911
912 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); 912 ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
913 if (ret != 0) 913 if (ret != 0)
914 goto done; 914 goto done;
915 915
@@ -940,9 +940,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
940 } 940 }
941 941
942 i = 0; 942 i = 0;
943 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, 943 while (ath10k_ce_completed_recv_next_nolock(ce_diag,
944 &completed_nbytes, 944 (void **)&buf,
945 &id, &flags) != 0) { 945 &completed_nbytes)
946 != 0) {
946 mdelay(1); 947 mdelay(1);
947 948
948 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 949 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -956,7 +957,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
956 goto done; 957 goto done;
957 } 958 }
958 959
959 if (buf != ce_data) { 960 if (*buf != ce_data) {
960 ret = -EIO; 961 ret = -EIO;
961 goto done; 962 goto done;
962 } 963 }
@@ -1026,10 +1027,8 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1026{ 1027{
1027 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1028 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1028 int ret = 0; 1029 int ret = 0;
1029 u32 buf; 1030 u32 *buf;
1030 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 1031 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
1031 unsigned int id;
1032 unsigned int flags;
1033 struct ath10k_ce_pipe *ce_diag; 1032 struct ath10k_ce_pipe *ce_diag;
1034 void *data_buf = NULL; 1033 void *data_buf = NULL;
1035 u32 ce_data; /* Host buffer address in CE space */ 1034 u32 ce_data; /* Host buffer address in CE space */
@@ -1078,7 +1077,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1078 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); 1077 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1079 1078
1080 /* Set up to receive directly into Target(!) address */ 1079 /* Set up to receive directly into Target(!) address */
1081 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address); 1080 ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
1082 if (ret != 0) 1081 if (ret != 0)
1083 goto done; 1082 goto done;
1084 1083
@@ -1103,9 +1102,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1103 } 1102 }
1104 1103
1105 i = 0; 1104 i = 0;
1106 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, 1105 while (ath10k_ce_completed_recv_next_nolock(ce_diag,
1107 &completed_nbytes, 1106 (void **)&buf,
1108 &id, &flags) != 0) { 1107 &completed_nbytes)
1108 != 0) {
1109 mdelay(1); 1109 mdelay(1);
1110 1110
1111 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 1111 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -1119,7 +1119,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1119 goto done; 1119 goto done;
1120 } 1120 }
1121 1121
1122 if (buf != address) { 1122 if (*buf != address) {
1123 ret = -EIO; 1123 ret = -EIO;
1124 goto done; 1124 goto done;
1125 } 1125 }
@@ -1181,15 +1181,11 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1181 struct sk_buff *skb; 1181 struct sk_buff *skb;
1182 struct sk_buff_head list; 1182 struct sk_buff_head list;
1183 void *transfer_context; 1183 void *transfer_context;
1184 u32 ce_data;
1185 unsigned int nbytes, max_nbytes; 1184 unsigned int nbytes, max_nbytes;
1186 unsigned int transfer_id;
1187 unsigned int flags;
1188 1185
1189 __skb_queue_head_init(&list); 1186 __skb_queue_head_init(&list);
1190 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, 1187 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1191 &ce_data, &nbytes, &transfer_id, 1188 &nbytes) == 0) {
1192 &flags) == 0) {
1193 skb = transfer_context; 1189 skb = transfer_context;
1194 max_nbytes = skb->len + skb_tailroom(skb); 1190 max_nbytes = skb->len + skb_tailroom(skb);
1195 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1191 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
@@ -1218,6 +1214,63 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1218 ath10k_pci_rx_post_pipe(pipe_info); 1214 ath10k_pci_rx_post_pipe(pipe_info);
1219} 1215}
1220 1216
1217static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1218 void (*callback)(struct ath10k *ar,
1219 struct sk_buff *skb))
1220{
1221 struct ath10k *ar = ce_state->ar;
1222 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1223 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1224 struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1225 struct sk_buff *skb;
1226 struct sk_buff_head list;
1227 void *transfer_context;
1228 unsigned int nbytes, max_nbytes, nentries;
1229 int orig_len;
1230
1231 /* No need to aquire ce_lock for CE5, since this is the only place CE5
1232 * is processed other than init and deinit. Before releasing CE5
1233 * buffers, interrupts are disabled. Thus CE5 access is serialized.
1234 */
1235 __skb_queue_head_init(&list);
1236 while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1237 &nbytes) == 0) {
1238 skb = transfer_context;
1239 max_nbytes = skb->len + skb_tailroom(skb);
1240
1241 if (unlikely(max_nbytes < nbytes)) {
1242 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1243 nbytes, max_nbytes);
1244 continue;
1245 }
1246
1247 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1248 max_nbytes, DMA_FROM_DEVICE);
1249 skb_put(skb, nbytes);
1250 __skb_queue_tail(&list, skb);
1251 }
1252
1253 nentries = skb_queue_len(&list);
1254 while ((skb = __skb_dequeue(&list))) {
1255 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1256 ce_state->id, skb->len);
1257 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1258 skb->data, skb->len);
1259
1260 orig_len = skb->len;
1261 callback(ar, skb);
1262 skb_push(skb, orig_len - skb->len);
1263 skb_reset_tail_pointer(skb);
1264 skb_trim(skb, 0);
1265
1266 /*let device gain the buffer again*/
1267 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1268 skb->len + skb_tailroom(skb),
1269 DMA_FROM_DEVICE);
1270 }
1271 ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1272}
1273
1221/* Called by lower (CE) layer when data is received from the Target. */ 1274/* Called by lower (CE) layer when data is received from the Target. */
1222static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) 1275static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1223{ 1276{
@@ -1274,7 +1327,7 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1274 */ 1327 */
1275 ath10k_ce_per_engine_service(ce_state->ar, 4); 1328 ath10k_ce_per_engine_service(ce_state->ar, 4);
1276 1329
1277 ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); 1330 ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1278} 1331}
1279 1332
1280int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 1333int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -1835,13 +1888,10 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1835{ 1888{
1836 struct ath10k *ar = ce_state->ar; 1889 struct ath10k *ar = ce_state->ar;
1837 struct bmi_xfer *xfer; 1890 struct bmi_xfer *xfer;
1838 u32 ce_data;
1839 unsigned int nbytes; 1891 unsigned int nbytes;
1840 unsigned int transfer_id;
1841 unsigned int flags;
1842 1892
1843 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data, 1893 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
1844 &nbytes, &transfer_id, &flags)) 1894 &nbytes))
1845 return; 1895 return;
1846 1896
1847 if (WARN_ON_ONCE(!xfer)) 1897 if (WARN_ON_ONCE(!xfer))
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index fbfb608e48ab..9369411a9ac0 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -49,25 +49,25 @@ out:
49 spin_unlock_bh(&ar->data_lock); 49 spin_unlock_bh(&ar->data_lock);
50} 50}
51 51
52void ath10k_txrx_tx_unref(struct ath10k_htt *htt, 52int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
53 const struct htt_tx_done *tx_done) 53 const struct htt_tx_done *tx_done)
54{ 54{
55 struct ath10k *ar = htt->ar; 55 struct ath10k *ar = htt->ar;
56 struct device *dev = ar->dev; 56 struct device *dev = ar->dev;
57 struct ieee80211_tx_info *info; 57 struct ieee80211_tx_info *info;
58 struct ieee80211_txq *txq;
58 struct ath10k_skb_cb *skb_cb; 59 struct ath10k_skb_cb *skb_cb;
60 struct ath10k_txq *artxq;
59 struct sk_buff *msdu; 61 struct sk_buff *msdu;
60 bool limit_mgmt_desc = false;
61 62
62 ath10k_dbg(ar, ATH10K_DBG_HTT, 63 ath10k_dbg(ar, ATH10K_DBG_HTT,
63 "htt tx completion msdu_id %u discard %d no_ack %d success %d\n", 64 "htt tx completion msdu_id %u status %d\n",
64 tx_done->msdu_id, !!tx_done->discard, 65 tx_done->msdu_id, tx_done->status);
65 !!tx_done->no_ack, !!tx_done->success);
66 66
67 if (tx_done->msdu_id >= htt->max_num_pending_tx) { 67 if (tx_done->msdu_id >= htt->max_num_pending_tx) {
68 ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n", 68 ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
69 tx_done->msdu_id); 69 tx_done->msdu_id);
70 return; 70 return -EINVAL;
71 } 71 }
72 72
73 spin_lock_bh(&htt->tx_lock); 73 spin_lock_bh(&htt->tx_lock);
@@ -76,17 +76,18 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
76 ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n", 76 ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
77 tx_done->msdu_id); 77 tx_done->msdu_id);
78 spin_unlock_bh(&htt->tx_lock); 78 spin_unlock_bh(&htt->tx_lock);
79 return; 79 return -ENOENT;
80 } 80 }
81 81
82 skb_cb = ATH10K_SKB_CB(msdu); 82 skb_cb = ATH10K_SKB_CB(msdu);
83 txq = skb_cb->txq;
84 artxq = (void *)txq->drv_priv;
83 85
84 if (unlikely(skb_cb->flags & ATH10K_SKB_F_MGMT) && 86 if (txq)
85 ar->hw_params.max_probe_resp_desc_thres) 87 artxq->num_fw_queued--;
86 limit_mgmt_desc = true;
87 88
88 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); 89 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
89 __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); 90 ath10k_htt_tx_dec_pending(htt);
90 if (htt->num_pending_tx == 0) 91 if (htt->num_pending_tx == 0)
91 wake_up(&htt->empty_tx_wq); 92 wake_up(&htt->empty_tx_wq);
92 spin_unlock_bh(&htt->tx_lock); 93 spin_unlock_bh(&htt->tx_lock);
@@ -99,22 +100,24 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
99 memset(&info->status, 0, sizeof(info->status)); 100 memset(&info->status, 0, sizeof(info->status));
100 trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id); 101 trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
101 102
102 if (tx_done->discard) { 103 if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
103 ieee80211_free_txskb(htt->ar->hw, msdu); 104 ieee80211_free_txskb(htt->ar->hw, msdu);
104 return; 105 return 0;
105 } 106 }
106 107
107 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 108 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
108 info->flags |= IEEE80211_TX_STAT_ACK; 109 info->flags |= IEEE80211_TX_STAT_ACK;
109 110
110 if (tx_done->no_ack) 111 if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
111 info->flags &= ~IEEE80211_TX_STAT_ACK; 112 info->flags &= ~IEEE80211_TX_STAT_ACK;
112 113
113 if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK)) 114 if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
115 (info->flags & IEEE80211_TX_CTL_NO_ACK))
114 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 116 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
115 117
116 ieee80211_tx_status(htt->ar->hw, msdu); 118 ieee80211_tx_status(htt->ar->hw, msdu);
117 /* we do not own the msdu anymore */ 119 /* we do not own the msdu anymore */
120 return 0;
118} 121}
119 122
120struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, 123struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
@@ -203,6 +206,7 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
203 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", 206 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
204 ev->vdev_id, ev->addr, ev->peer_id); 207 ev->vdev_id, ev->addr, ev->peer_id);
205 208
209 ar->peer_map[ev->peer_id] = peer;
206 set_bit(ev->peer_id, peer->peer_ids); 210 set_bit(ev->peer_id, peer->peer_ids);
207exit: 211exit:
208 spin_unlock_bh(&ar->data_lock); 212 spin_unlock_bh(&ar->data_lock);
@@ -225,6 +229,7 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
225 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", 229 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
226 peer->vdev_id, peer->addr, ev->peer_id); 230 peer->vdev_id, peer->addr, ev->peer_id);
227 231
232 ar->peer_map[ev->peer_id] = NULL;
228 clear_bit(ev->peer_id, peer->peer_ids); 233 clear_bit(ev->peer_id, peer->peer_ids);
229 234
230 if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) { 235 if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index a90e09f5c7f2..e7ea1ae1c438 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -19,8 +19,8 @@
19 19
20#include "htt.h" 20#include "htt.h"
21 21
22void ath10k_txrx_tx_unref(struct ath10k_htt *htt, 22int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
23 const struct htt_tx_done *tx_done); 23 const struct htt_tx_done *tx_done);
24 24
25struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, 25struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
26 const u8 *addr); 26 const u8 *addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 32ab34edceb5..7fb00dcc03b8 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -186,6 +186,9 @@ struct wmi_ops {
186 u8 enable, 186 u8 enable,
187 u32 detect_level, 187 u32 detect_level,
188 u32 detect_margin); 188 u32 detect_margin);
189 struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
190 enum wmi_host_platform_type type,
191 u32 fw_feature_bitmap);
189 int (*get_vdev_subtype)(struct ath10k *ar, 192 int (*get_vdev_subtype)(struct ath10k *ar,
190 enum wmi_vdev_subtype subtype); 193 enum wmi_vdev_subtype subtype);
191}; 194};
@@ -1330,6 +1333,26 @@ ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1330} 1333}
1331 1334
1332static inline int 1335static inline int
1336ath10k_wmi_ext_resource_config(struct ath10k *ar,
1337 enum wmi_host_platform_type type,
1338 u32 fw_feature_bitmap)
1339{
1340 struct sk_buff *skb;
1341
1342 if (!ar->wmi.ops->ext_resource_config)
1343 return -EOPNOTSUPP;
1344
1345 skb = ar->wmi.ops->ext_resource_config(ar, type,
1346 fw_feature_bitmap);
1347
1348 if (IS_ERR(skb))
1349 return PTR_ERR(skb);
1350
1351 return ath10k_wmi_cmd_send(ar, skb,
1352 ar->wmi.cmd->ext_resource_cfg_cmdid);
1353}
1354
1355static inline int
1333ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1356ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1334{ 1357{
1335 if (!ar->wmi.ops->get_vdev_subtype) 1358 if (!ar->wmi.ops->get_vdev_subtype)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 70261387d1a5..4c75c74be5e7 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -705,6 +705,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
705 .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID, 705 .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
706 .pdev_bss_chan_info_request_cmdid = 706 .pdev_bss_chan_info_request_cmdid =
707 WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, 707 WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
708 .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
708}; 709};
709 710
710/* MAIN WMI VDEV param map */ 711/* MAIN WMI VDEV param map */
@@ -2099,34 +2100,6 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
2099 return 0; 2100 return 0;
2100} 2101}
2101 2102
2102static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
2103{
2104 enum ieee80211_band band;
2105
2106 switch (phy_mode) {
2107 case MODE_11A:
2108 case MODE_11NA_HT20:
2109 case MODE_11NA_HT40:
2110 case MODE_11AC_VHT20:
2111 case MODE_11AC_VHT40:
2112 case MODE_11AC_VHT80:
2113 band = IEEE80211_BAND_5GHZ;
2114 break;
2115 case MODE_11G:
2116 case MODE_11B:
2117 case MODE_11GONLY:
2118 case MODE_11NG_HT20:
2119 case MODE_11NG_HT40:
2120 case MODE_11AC_VHT20_2G:
2121 case MODE_11AC_VHT40_2G:
2122 case MODE_11AC_VHT80_2G:
2123 default:
2124 band = IEEE80211_BAND_2GHZ;
2125 }
2126
2127 return band;
2128}
2129
2130/* If keys are configured, HW decrypts all frames 2103/* If keys are configured, HW decrypts all frames
2131 * with protected bit set. Mark such frames as decrypted. 2104 * with protected bit set. Mark such frames as decrypted.
2132 */ 2105 */
@@ -2167,8 +2140,10 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
2167 struct wmi_mgmt_rx_event_v1 *ev_v1; 2140 struct wmi_mgmt_rx_event_v1 *ev_v1;
2168 struct wmi_mgmt_rx_event_v2 *ev_v2; 2141 struct wmi_mgmt_rx_event_v2 *ev_v2;
2169 struct wmi_mgmt_rx_hdr_v1 *ev_hdr; 2142 struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
2143 struct wmi_mgmt_rx_ext_info *ext_info;
2170 size_t pull_len; 2144 size_t pull_len;
2171 u32 msdu_len; 2145 u32 msdu_len;
2146 u32 len;
2172 2147
2173 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) { 2148 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
2174 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data; 2149 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
@@ -2195,6 +2170,12 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
2195 if (skb->len < msdu_len) 2170 if (skb->len < msdu_len)
2196 return -EPROTO; 2171 return -EPROTO;
2197 2172
2173 if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2174 len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2175 ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2176 memcpy(&arg->ext_info, ext_info,
2177 sizeof(struct wmi_mgmt_rx_ext_info));
2178 }
2198 /* the WMI buffer might've ended up being padded to 4 bytes due to HTC 2179 /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
2199 * trailer with credit update. Trim the excess garbage. 2180 * trailer with credit update. Trim the excess garbage.
2200 */ 2181 */
@@ -2211,6 +2192,8 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
2211 struct wmi_10_4_mgmt_rx_hdr *ev_hdr; 2192 struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
2212 size_t pull_len; 2193 size_t pull_len;
2213 u32 msdu_len; 2194 u32 msdu_len;
2195 struct wmi_mgmt_rx_ext_info *ext_info;
2196 u32 len;
2214 2197
2215 ev = (struct wmi_10_4_mgmt_rx_event *)skb->data; 2198 ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
2216 ev_hdr = &ev->hdr; 2199 ev_hdr = &ev->hdr;
@@ -2231,6 +2214,13 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
2231 if (skb->len < msdu_len) 2214 if (skb->len < msdu_len)
2232 return -EPROTO; 2215 return -EPROTO;
2233 2216
2217 if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2218 len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2219 ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2220 memcpy(&arg->ext_info, ext_info,
2221 sizeof(struct wmi_mgmt_rx_ext_info));
2222 }
2223
2234 /* Make sure bytes added for padding are removed. */ 2224 /* Make sure bytes added for padding are removed. */
2235 skb_trim(skb, msdu_len); 2225 skb_trim(skb, msdu_len);
2236 2226
@@ -2281,14 +2271,19 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
2281 if (rx_status & WMI_RX_STATUS_ERR_MIC) 2271 if (rx_status & WMI_RX_STATUS_ERR_MIC)
2282 status->flag |= RX_FLAG_MMIC_ERROR; 2272 status->flag |= RX_FLAG_MMIC_ERROR;
2283 2273
2274 if (rx_status & WMI_RX_STATUS_EXT_INFO) {
2275 status->mactime =
2276 __le64_to_cpu(arg.ext_info.rx_mac_timestamp);
2277 status->flag |= RX_FLAG_MACTIME_END;
2278 }
2284 /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to 2279 /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
2285 * MODE_11B. This means phy_mode is not a reliable source for the band 2280 * MODE_11B. This means phy_mode is not a reliable source for the band
2286 * of mgmt rx. 2281 * of mgmt rx.
2287 */ 2282 */
2288 if (channel >= 1 && channel <= 14) { 2283 if (channel >= 1 && channel <= 14) {
2289 status->band = IEEE80211_BAND_2GHZ; 2284 status->band = NL80211_BAND_2GHZ;
2290 } else if (channel >= 36 && channel <= 165) { 2285 } else if (channel >= 36 && channel <= 165) {
2291 status->band = IEEE80211_BAND_5GHZ; 2286 status->band = NL80211_BAND_5GHZ;
2292 } else { 2287 } else {
2293 /* Shouldn't happen unless list of advertised channels to 2288 /* Shouldn't happen unless list of advertised channels to
2294 * mac80211 has been changed. 2289 * mac80211 has been changed.
@@ -2298,7 +2293,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
2298 return 0; 2293 return 0;
2299 } 2294 }
2300 2295
2301 if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ) 2296 if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
2302 ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); 2297 ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
2303 2298
2304 sband = &ar->mac.sbands[status->band]; 2299 sband = &ar->mac.sbands[status->band];
@@ -2310,6 +2305,12 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
2310 hdr = (struct ieee80211_hdr *)skb->data; 2305 hdr = (struct ieee80211_hdr *)skb->data;
2311 fc = le16_to_cpu(hdr->frame_control); 2306 fc = le16_to_cpu(hdr->frame_control);
2312 2307
2308 /* Firmware is guaranteed to report all essential management frames via
2309 * WMI while it can deliver some extra via HTT. Since there can be
2310 * duplicates split the reporting wrt monitor/sniffing.
2311 */
2312 status->flag |= RX_FLAG_SKIP_MONITOR;
2313
2313 ath10k_wmi_handle_wep_reauth(ar, skb, status); 2314 ath10k_wmi_handle_wep_reauth(ar, skb, status);
2314 2315
2315 /* FW delivers WEP Shared Auth frame with Protected Bit set and 2316 /* FW delivers WEP Shared Auth frame with Protected Bit set and
@@ -2351,7 +2352,7 @@ static int freq_to_idx(struct ath10k *ar, int freq)
2351 struct ieee80211_supported_band *sband; 2352 struct ieee80211_supported_band *sband;
2352 int band, ch, idx = 0; 2353 int band, ch, idx = 0;
2353 2354
2354 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { 2355 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
2355 sband = ar->hw->wiphy->bands[band]; 2356 sband = ar->hw->wiphy->bands[band];
2356 if (!sband) 2357 if (!sband)
2357 continue; 2358 continue;
@@ -2612,6 +2613,16 @@ void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
2612 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); 2613 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2613} 2614}
2614 2615
2616static void
2617ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
2618 struct ath10k_fw_stats_peer *dst)
2619{
2620 ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2621 dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2622 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2623 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
2624}
2625
2615static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar, 2626static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
2616 struct sk_buff *skb, 2627 struct sk_buff *skb,
2617 struct ath10k_fw_stats *stats) 2628 struct ath10k_fw_stats *stats)
@@ -2865,11 +2876,8 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
2865 const struct wmi_10_2_4_ext_peer_stats *src; 2876 const struct wmi_10_2_4_ext_peer_stats *src;
2866 struct ath10k_fw_stats_peer *dst; 2877 struct ath10k_fw_stats_peer *dst;
2867 int stats_len; 2878 int stats_len;
2868 bool ext_peer_stats_support;
2869 2879
2870 ext_peer_stats_support = test_bit(WMI_SERVICE_PEER_STATS, 2880 if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
2871 ar->wmi.svc_map);
2872 if (ext_peer_stats_support)
2873 stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats); 2881 stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
2874 else 2882 else
2875 stats_len = sizeof(struct wmi_10_2_4_peer_stats); 2883 stats_len = sizeof(struct wmi_10_2_4_peer_stats);
@@ -2886,7 +2894,7 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
2886 2894
2887 dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate); 2895 dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
2888 2896
2889 if (ext_peer_stats_support) 2897 if (ath10k_peer_stats_enabled(ar))
2890 dst->rx_duration = __le32_to_cpu(src->rx_duration); 2898 dst->rx_duration = __le32_to_cpu(src->rx_duration);
2891 /* FIXME: expose 10.2 specific values */ 2899 /* FIXME: expose 10.2 specific values */
2892 2900
@@ -2905,6 +2913,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
2905 u32 num_pdev_ext_stats; 2913 u32 num_pdev_ext_stats;
2906 u32 num_vdev_stats; 2914 u32 num_vdev_stats;
2907 u32 num_peer_stats; 2915 u32 num_peer_stats;
2916 u32 stats_id;
2908 int i; 2917 int i;
2909 2918
2910 if (!skb_pull(skb, sizeof(*ev))) 2919 if (!skb_pull(skb, sizeof(*ev)))
@@ -2914,6 +2923,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
2914 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); 2923 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
2915 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); 2924 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
2916 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); 2925 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
2926 stats_id = __le32_to_cpu(ev->stats_id);
2917 2927
2918 for (i = 0; i < num_pdev_stats; i++) { 2928 for (i = 0; i < num_pdev_stats; i++) {
2919 const struct wmi_10_4_pdev_stats *src; 2929 const struct wmi_10_4_pdev_stats *src;
@@ -2953,22 +2963,28 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
2953 /* fw doesn't implement vdev stats */ 2963 /* fw doesn't implement vdev stats */
2954 2964
2955 for (i = 0; i < num_peer_stats; i++) { 2965 for (i = 0; i < num_peer_stats; i++) {
2956 const struct wmi_10_4_peer_stats *src; 2966 const struct wmi_10_4_peer_extd_stats *src;
2957 struct ath10k_fw_stats_peer *dst; 2967 struct ath10k_fw_stats_peer *dst;
2968 int stats_len;
2969 bool extd_peer_stats = !!(stats_id & WMI_10_4_STAT_PEER_EXTD);
2970
2971 if (extd_peer_stats)
2972 stats_len = sizeof(struct wmi_10_4_peer_extd_stats);
2973 else
2974 stats_len = sizeof(struct wmi_10_4_peer_stats);
2958 2975
2959 src = (void *)skb->data; 2976 src = (void *)skb->data;
2960 if (!skb_pull(skb, sizeof(*src))) 2977 if (!skb_pull(skb, stats_len))
2961 return -EPROTO; 2978 return -EPROTO;
2962 2979
2963 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 2980 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
2964 if (!dst) 2981 if (!dst)
2965 continue; 2982 continue;
2966 2983
2967 ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); 2984 ath10k_wmi_10_4_pull_peer_stats(&src->common, dst);
2968 dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2969 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2970 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
2971 /* FIXME: expose 10.4 specific values */ 2985 /* FIXME: expose 10.4 specific values */
2986 if (extd_peer_stats)
2987 dst->rx_duration = __le32_to_cpu(src->rx_duration);
2972 2988
2973 list_add_tail(&dst->list, &stats->peers); 2989 list_add_tail(&dst->list, &stats->peers);
2974 } 2990 }
@@ -4617,10 +4633,16 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
4617 } 4633 }
4618 4634
4619 if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) { 4635 if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
4636 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
4637 ar->fw_features))
4638 ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
4639 ar->max_num_vdevs;
4640 else
4641 ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
4642 ar->max_num_vdevs;
4643
4620 ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX + 4644 ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
4621 ar->max_num_vdevs; 4645 ar->max_num_vdevs;
4622 ar->num_active_peers = ar->hw_params.qcache_active_peers +
4623 ar->max_num_vdevs;
4624 ar->num_tids = ar->num_active_peers * 2; 4646 ar->num_tids = ar->num_active_peers * 2;
4625 ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX; 4647 ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
4626 } 4648 }
@@ -5517,7 +5539,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
5517 5539
5518 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); 5540 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
5519 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); 5541 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
5520 if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { 5542
5543 if (ath10k_peer_stats_enabled(ar)) {
5521 config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS); 5544 config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
5522 config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS); 5545 config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
5523 } else { 5546 } else {
@@ -5579,7 +5602,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
5579 test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) 5602 test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
5580 features |= WMI_10_2_COEX_GPIO; 5603 features |= WMI_10_2_COEX_GPIO;
5581 5604
5582 if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) 5605 if (ath10k_peer_stats_enabled(ar))
5583 features |= WMI_10_2_PEER_STATS; 5606 features |= WMI_10_2_PEER_STATS;
5584 5607
5585 cmd->resource_config.feature_mask = __cpu_to_le32(features); 5608 cmd->resource_config.feature_mask = __cpu_to_le32(features);
@@ -7484,6 +7507,28 @@ static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
7484 return -ENOTSUPP; 7507 return -ENOTSUPP;
7485} 7508}
7486 7509
7510static struct sk_buff *
7511ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
7512 enum wmi_host_platform_type type,
7513 u32 fw_feature_bitmap)
7514{
7515 struct wmi_ext_resource_config_10_4_cmd *cmd;
7516 struct sk_buff *skb;
7517
7518 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7519 if (!skb)
7520 return ERR_PTR(-ENOMEM);
7521
7522 cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
7523 cmd->host_platform_config = __cpu_to_le32(type);
7524 cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
7525
7526 ath10k_dbg(ar, ATH10K_DBG_WMI,
7527 "wmi ext resource config host type %d firmware feature bitmap %08x\n",
7528 type, fw_feature_bitmap);
7529 return skb;
7530}
7531
7487static const struct wmi_ops wmi_ops = { 7532static const struct wmi_ops wmi_ops = {
7488 .rx = ath10k_wmi_op_rx, 7533 .rx = ath10k_wmi_op_rx,
7489 .map_svc = wmi_main_svc_map, 7534 .map_svc = wmi_main_svc_map,
@@ -7810,6 +7855,7 @@ static const struct wmi_ops wmi_10_4_ops = {
7810 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, 7855 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
7811 .gen_delba_send = ath10k_wmi_op_gen_delba_send, 7856 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
7812 .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill, 7857 .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
7858 .ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
7813 7859
7814 /* shared with 10.2 */ 7860 /* shared with 10.2 */
7815 .gen_request_stats = ath10k_wmi_op_gen_request_stats, 7861 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 4d3cbc44fcd2..feebd19ff08c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -816,6 +816,7 @@ struct wmi_cmd_map {
816 u32 set_cca_params_cmdid; 816 u32 set_cca_params_cmdid;
817 u32 pdev_bss_chan_info_request_cmdid; 817 u32 pdev_bss_chan_info_request_cmdid;
818 u32 pdev_enable_adaptive_cca_cmdid; 818 u32 pdev_enable_adaptive_cca_cmdid;
819 u32 ext_resource_cfg_cmdid;
819}; 820};
820 821
821/* 822/*
@@ -2667,6 +2668,31 @@ struct wmi_resource_config_10_4 {
2667 __le32 qwrap_config; 2668 __le32 qwrap_config;
2668} __packed; 2669} __packed;
2669 2670
2671/**
2672 * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags
2673 * @WMI_10_4_LTEU_SUPPORT: LTEU config
2674 * @WMI_10_4_COEX_GPIO_SUPPORT: COEX GPIO config
2675 * @WMI_10_4_AUX_RADIO_SPECTRAL_INTF: AUX Radio Enhancement for spectral scan
2676 * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan
2677 * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats
2678 * @WMI_10_4_PEER_STATS: Per station stats
2679 */
2680enum wmi_10_4_feature_mask {
2681 WMI_10_4_LTEU_SUPPORT = BIT(0),
2682 WMI_10_4_COEX_GPIO_SUPPORT = BIT(1),
2683 WMI_10_4_AUX_RADIO_SPECTRAL_INTF = BIT(2),
2684 WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF = BIT(3),
2685 WMI_10_4_BSS_CHANNEL_INFO_64 = BIT(4),
2686 WMI_10_4_PEER_STATS = BIT(5),
2687};
2688
2689struct wmi_ext_resource_config_10_4_cmd {
2690 /* contains enum wmi_host_platform_type */
2691 __le32 host_platform_config;
2692 /* see enum wmi_10_4_feature_mask */
2693 __le32 fw_feature_bitmap;
2694};
2695
2670/* strucutre describing host memory chunk. */ 2696/* strucutre describing host memory chunk. */
2671struct host_memory_chunk { 2697struct host_memory_chunk {
2672 /* id of the request that is passed up in service ready */ 2698 /* id of the request that is passed up in service ready */
@@ -3037,11 +3063,17 @@ struct wmi_10_4_mgmt_rx_event {
3037 u8 buf[0]; 3063 u8 buf[0];
3038} __packed; 3064} __packed;
3039 3065
3066struct wmi_mgmt_rx_ext_info {
3067 __le64 rx_mac_timestamp;
3068} __packed __aligned(4);
3069
3040#define WMI_RX_STATUS_OK 0x00 3070#define WMI_RX_STATUS_OK 0x00
3041#define WMI_RX_STATUS_ERR_CRC 0x01 3071#define WMI_RX_STATUS_ERR_CRC 0x01
3042#define WMI_RX_STATUS_ERR_DECRYPT 0x08 3072#define WMI_RX_STATUS_ERR_DECRYPT 0x08
3043#define WMI_RX_STATUS_ERR_MIC 0x10 3073#define WMI_RX_STATUS_ERR_MIC 0x10
3044#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20 3074#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
3075/* Extension data at the end of mgmt frame */
3076#define WMI_RX_STATUS_EXT_INFO 0x40
3045 3077
3046#define PHY_ERROR_GEN_SPECTRAL_SCAN 0x26 3078#define PHY_ERROR_GEN_SPECTRAL_SCAN 0x26
3047#define PHY_ERROR_GEN_FALSE_RADAR_EXT 0x24 3079#define PHY_ERROR_GEN_FALSE_RADAR_EXT 0x24
@@ -4072,6 +4104,13 @@ enum wmi_stats_id {
4072 WMI_STAT_VDEV_RATE = BIT(5), 4104 WMI_STAT_VDEV_RATE = BIT(5),
4073}; 4105};
4074 4106
4107enum wmi_10_4_stats_id {
4108 WMI_10_4_STAT_PEER = BIT(0),
4109 WMI_10_4_STAT_AP = BIT(1),
4110 WMI_10_4_STAT_INST = BIT(2),
4111 WMI_10_4_STAT_PEER_EXTD = BIT(3),
4112};
4113
4075struct wlan_inst_rssi_args { 4114struct wlan_inst_rssi_args {
4076 __le16 cfg_retry_count; 4115 __le16 cfg_retry_count;
4077 __le16 retry_count; 4116 __le16 retry_count;
@@ -4271,6 +4310,15 @@ struct wmi_10_4_peer_stats {
4271 __le32 peer_rssi_changed; 4310 __le32 peer_rssi_changed;
4272} __packed; 4311} __packed;
4273 4312
4313struct wmi_10_4_peer_extd_stats {
4314 struct wmi_10_4_peer_stats common;
4315 struct wmi_mac_addr peer_macaddr;
4316 __le32 inactive_time;
4317 __le32 peer_chain_rssi;
4318 __le32 rx_duration;
4319 __le32 reserved[10];
4320} __packed;
4321
4274struct wmi_10_2_pdev_ext_stats { 4322struct wmi_10_2_pdev_ext_stats {
4275 __le32 rx_rssi_comb; 4323 __le32 rx_rssi_comb;
4276 __le32 rx_rssi[4]; 4324 __le32 rx_rssi[4];
@@ -6116,6 +6164,7 @@ struct wmi_mgmt_rx_ev_arg {
6116 __le32 phy_mode; 6164 __le32 phy_mode;
6117 __le32 buf_len; 6165 __le32 buf_len;
6118 __le32 status; /* %WMI_RX_STATUS_ */ 6166 __le32 status; /* %WMI_RX_STATUS_ */
6167 struct wmi_mgmt_rx_ext_info ext_info;
6119}; 6168};
6120 6169
6121struct wmi_ch_info_ev_arg { 6170struct wmi_ch_info_ev_arg {
@@ -6401,6 +6450,11 @@ struct wmi_pdev_set_adaptive_cca_params {
6401 __le32 cca_detect_margin; 6450 __le32 cca_detect_margin;
6402} __packed; 6451} __packed;
6403 6452
6453enum wmi_host_platform_type {
6454 WMI_HOST_PLATFORM_HIGH_PERF,
6455 WMI_HOST_PLATFORM_LOW_PERF,
6456};
6457
6404struct ath10k; 6458struct ath10k;
6405struct ath10k_vif; 6459struct ath10k_vif;
6406struct ath10k_fw_stats_pdev; 6460struct ath10k_fw_stats_pdev;
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 38be2702c0e2..0624333f5430 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -279,7 +279,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
279 if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) 279 if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
280 ath5k_ani_set_firstep_level(ah, as->firstep_level + 1); 280 ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
281 return; 281 return;
282 } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) { 282 } else if (ah->ah_current_channel->band == NL80211_BAND_2GHZ) {
283 /* beacon RSSI is low. in B/G mode turn of OFDM weak signal 283 /* beacon RSSI is low. in B/G mode turn of OFDM weak signal
284 * detect and zero firstep level to maximize CCK sensitivity */ 284 * detect and zero firstep level to maximize CCK sensitivity */
285 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, 285 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ba12f7f4061d..67fedb61fcc0 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1265,10 +1265,10 @@ struct ath5k_hw {
1265 void __iomem *iobase; /* address of the device */ 1265 void __iomem *iobase; /* address of the device */
1266 struct mutex lock; /* dev-level lock */ 1266 struct mutex lock; /* dev-level lock */
1267 struct ieee80211_hw *hw; /* IEEE 802.11 common */ 1267 struct ieee80211_hw *hw; /* IEEE 802.11 common */
1268 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 1268 struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
1269 struct ieee80211_channel channels[ATH_CHAN_MAX]; 1269 struct ieee80211_channel channels[ATH_CHAN_MAX];
1270 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; 1270 struct ieee80211_rate rates[NUM_NL80211_BANDS][AR5K_MAX_RATES];
1271 s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; 1271 s8 rate_idx[NUM_NL80211_BANDS][AR5K_MAX_RATES];
1272 enum nl80211_iftype opmode; 1272 enum nl80211_iftype opmode;
1273 1273
1274#ifdef CONFIG_ATH5K_DEBUG 1274#ifdef CONFIG_ATH5K_DEBUG
@@ -1532,7 +1532,7 @@ int ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
1532 1532
1533/* Protocol Control Unit Functions */ 1533/* Protocol Control Unit Functions */
1534/* Helpers */ 1534/* Helpers */
1535int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band, 1535int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band,
1536 int len, struct ieee80211_rate *rate, bool shortpre); 1536 int len, struct ieee80211_rate *rate, bool shortpre);
1537unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah); 1537unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
1538unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah); 1538unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
@@ -1611,7 +1611,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
1611 1611
1612/* PHY functions */ 1612/* PHY functions */
1613/* Misc PHY functions */ 1613/* Misc PHY functions */
1614u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band); 1614u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band);
1615int ath5k_hw_phy_disable(struct ath5k_hw *ah); 1615int ath5k_hw_phy_disable(struct ath5k_hw *ah);
1616/* Gain_F optimization */ 1616/* Gain_F optimization */
1617enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah); 1617enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 66b6366158b9..233054bd6b52 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -152,7 +152,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
152 ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) & 152 ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
153 0xffffffff; 153 0xffffffff;
154 ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah, 154 ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
155 IEEE80211_BAND_5GHZ); 155 NL80211_BAND_5GHZ);
156 156
157 /* Try to identify radio chip based on its srev */ 157 /* Try to identify radio chip based on its srev */
158 switch (ah->ah_radio_5ghz_revision & 0xf0) { 158 switch (ah->ah_radio_5ghz_revision & 0xf0) {
@@ -160,14 +160,14 @@ int ath5k_hw_init(struct ath5k_hw *ah)
160 ah->ah_radio = AR5K_RF5111; 160 ah->ah_radio = AR5K_RF5111;
161 ah->ah_single_chip = false; 161 ah->ah_single_chip = false;
162 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, 162 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
163 IEEE80211_BAND_2GHZ); 163 NL80211_BAND_2GHZ);
164 break; 164 break;
165 case AR5K_SREV_RAD_5112: 165 case AR5K_SREV_RAD_5112:
166 case AR5K_SREV_RAD_2112: 166 case AR5K_SREV_RAD_2112:
167 ah->ah_radio = AR5K_RF5112; 167 ah->ah_radio = AR5K_RF5112;
168 ah->ah_single_chip = false; 168 ah->ah_single_chip = false;
169 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, 169 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
170 IEEE80211_BAND_2GHZ); 170 NL80211_BAND_2GHZ);
171 break; 171 break;
172 case AR5K_SREV_RAD_2413: 172 case AR5K_SREV_RAD_2413:
173 ah->ah_radio = AR5K_RF2413; 173 ah->ah_radio = AR5K_RF2413;
@@ -204,7 +204,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
204 ah->ah_radio = AR5K_RF5111; 204 ah->ah_radio = AR5K_RF5111;
205 ah->ah_single_chip = false; 205 ah->ah_single_chip = false;
206 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, 206 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
207 IEEE80211_BAND_2GHZ); 207 NL80211_BAND_2GHZ);
208 } else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) || 208 } else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) ||
209 ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) || 209 ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
210 ah->ah_phy_revision == AR5K_SREV_PHY_2425) { 210 ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 3d946d8b2db2..d98fd421c7ec 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -268,15 +268,15 @@ static void ath5k_reg_notifier(struct wiphy *wiphy,
268 * Returns true for the channel numbers used. 268 * Returns true for the channel numbers used.
269 */ 269 */
270#ifdef CONFIG_ATH5K_TEST_CHANNELS 270#ifdef CONFIG_ATH5K_TEST_CHANNELS
271static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) 271static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
272{ 272{
273 return true; 273 return true;
274} 274}
275 275
276#else 276#else
277static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) 277static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
278{ 278{
279 if (band == IEEE80211_BAND_2GHZ && chan <= 14) 279 if (band == NL80211_BAND_2GHZ && chan <= 14)
280 return true; 280 return true;
281 281
282 return /* UNII 1,2 */ 282 return /* UNII 1,2 */
@@ -297,18 +297,18 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
297 unsigned int mode, unsigned int max) 297 unsigned int mode, unsigned int max)
298{ 298{
299 unsigned int count, size, freq, ch; 299 unsigned int count, size, freq, ch;
300 enum ieee80211_band band; 300 enum nl80211_band band;
301 301
302 switch (mode) { 302 switch (mode) {
303 case AR5K_MODE_11A: 303 case AR5K_MODE_11A:
304 /* 1..220, but 2GHz frequencies are filtered by check_channel */ 304 /* 1..220, but 2GHz frequencies are filtered by check_channel */
305 size = 220; 305 size = 220;
306 band = IEEE80211_BAND_5GHZ; 306 band = NL80211_BAND_5GHZ;
307 break; 307 break;
308 case AR5K_MODE_11B: 308 case AR5K_MODE_11B:
309 case AR5K_MODE_11G: 309 case AR5K_MODE_11G:
310 size = 26; 310 size = 26;
311 band = IEEE80211_BAND_2GHZ; 311 band = NL80211_BAND_2GHZ;
312 break; 312 break;
313 default: 313 default:
314 ATH5K_WARN(ah, "bad mode, not copying channels\n"); 314 ATH5K_WARN(ah, "bad mode, not copying channels\n");
@@ -363,13 +363,13 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
363 int max_c, count_c = 0; 363 int max_c, count_c = 0;
364 int i; 364 int i;
365 365
366 BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS); 366 BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < NUM_NL80211_BANDS);
367 max_c = ARRAY_SIZE(ah->channels); 367 max_c = ARRAY_SIZE(ah->channels);
368 368
369 /* 2GHz band */ 369 /* 2GHz band */
370 sband = &ah->sbands[IEEE80211_BAND_2GHZ]; 370 sband = &ah->sbands[NL80211_BAND_2GHZ];
371 sband->band = IEEE80211_BAND_2GHZ; 371 sband->band = NL80211_BAND_2GHZ;
372 sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0]; 372 sband->bitrates = &ah->rates[NL80211_BAND_2GHZ][0];
373 373
374 if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) { 374 if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
375 /* G mode */ 375 /* G mode */
@@ -381,7 +381,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
381 sband->n_channels = ath5k_setup_channels(ah, sband->channels, 381 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
382 AR5K_MODE_11G, max_c); 382 AR5K_MODE_11G, max_c);
383 383
384 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 384 hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
385 count_c = sband->n_channels; 385 count_c = sband->n_channels;
386 max_c -= count_c; 386 max_c -= count_c;
387 } else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) { 387 } else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
@@ -407,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
407 sband->n_channels = ath5k_setup_channels(ah, sband->channels, 407 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
408 AR5K_MODE_11B, max_c); 408 AR5K_MODE_11B, max_c);
409 409
410 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 410 hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
411 count_c = sband->n_channels; 411 count_c = sband->n_channels;
412 max_c -= count_c; 412 max_c -= count_c;
413 } 413 }
@@ -415,9 +415,9 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
415 415
416 /* 5GHz band, A mode */ 416 /* 5GHz band, A mode */
417 if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) { 417 if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
418 sband = &ah->sbands[IEEE80211_BAND_5GHZ]; 418 sband = &ah->sbands[NL80211_BAND_5GHZ];
419 sband->band = IEEE80211_BAND_5GHZ; 419 sband->band = NL80211_BAND_5GHZ;
420 sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0]; 420 sband->bitrates = &ah->rates[NL80211_BAND_5GHZ][0];
421 421
422 memcpy(sband->bitrates, &ath5k_rates[4], 422 memcpy(sband->bitrates, &ath5k_rates[4],
423 sizeof(struct ieee80211_rate) * 8); 423 sizeof(struct ieee80211_rate) * 8);
@@ -427,7 +427,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
427 sband->n_channels = ath5k_setup_channels(ah, sband->channels, 427 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
428 AR5K_MODE_11A, max_c); 428 AR5K_MODE_11A, max_c);
429 429
430 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; 430 hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
431 } 431 }
432 ath5k_setup_rate_idx(ah, sband); 432 ath5k_setup_rate_idx(ah, sband);
433 433
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 654a1e33f827..929d7ccc031c 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -1043,14 +1043,14 @@ ath5k_debug_dump_bands(struct ath5k_hw *ah)
1043 1043
1044 BUG_ON(!ah->sbands); 1044 BUG_ON(!ah->sbands);
1045 1045
1046 for (b = 0; b < IEEE80211_NUM_BANDS; b++) { 1046 for (b = 0; b < NUM_NL80211_BANDS; b++) {
1047 struct ieee80211_supported_band *band = &ah->sbands[b]; 1047 struct ieee80211_supported_band *band = &ah->sbands[b];
1048 char bname[6]; 1048 char bname[6];
1049 switch (band->band) { 1049 switch (band->band) {
1050 case IEEE80211_BAND_2GHZ: 1050 case NL80211_BAND_2GHZ:
1051 strcpy(bname, "2 GHz"); 1051 strcpy(bname, "2 GHz");
1052 break; 1052 break;
1053 case IEEE80211_BAND_5GHZ: 1053 case NL80211_BAND_5GHZ:
1054 strcpy(bname, "5 GHz"); 1054 strcpy(bname, "5 GHz");
1055 break; 1055 break;
1056 default: 1056 default:
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 803030fd17d3..6a2a16856763 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -77,7 +77,7 @@ static const struct pci_device_id ath5k_led_devices[] = {
77 /* HP Compaq CQ60-206US (ddreggors@jumptv.com) */ 77 /* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
78 { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) }, 78 { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
79 /* HP Compaq C700 (nitrousnrg@gmail.com) */ 79 /* HP Compaq C700 (nitrousnrg@gmail.com) */
80 { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) }, 80 { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) },
81 /* LiteOn AR5BXB63 (magooz@salug.it) */ 81 /* LiteOn AR5BXB63 (magooz@salug.it) */
82 { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) }, 82 { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
83 /* IBM-specific AR5212 (all others) */ 83 /* IBM-specific AR5212 (all others) */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index bf29da5e90da..fc47b70988b1 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -110,7 +110,7 @@ static const unsigned int ack_rates_high[] =
110 * bwmodes. 110 * bwmodes.
111 */ 111 */
112int 112int
113ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band, 113ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band,
114 int len, struct ieee80211_rate *rate, bool shortpre) 114 int len, struct ieee80211_rate *rate, bool shortpre)
115{ 115{
116 int sifs, preamble, plcp_bits, sym_time; 116 int sifs, preamble, plcp_bits, sym_time;
@@ -221,7 +221,7 @@ ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
221 case AR5K_BWMODE_DEFAULT: 221 case AR5K_BWMODE_DEFAULT:
222 sifs = AR5K_INIT_SIFS_DEFAULT_BG; 222 sifs = AR5K_INIT_SIFS_DEFAULT_BG;
223 default: 223 default:
224 if (channel->band == IEEE80211_BAND_5GHZ) 224 if (channel->band == NL80211_BAND_5GHZ)
225 sifs = AR5K_INIT_SIFS_DEFAULT_A; 225 sifs = AR5K_INIT_SIFS_DEFAULT_A;
226 break; 226 break;
227 } 227 }
@@ -279,7 +279,7 @@ ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
279 struct ieee80211_rate *rate; 279 struct ieee80211_rate *rate;
280 unsigned int i; 280 unsigned int i;
281 /* 802.11g covers both OFDM and CCK */ 281 /* 802.11g covers both OFDM and CCK */
282 u8 band = IEEE80211_BAND_2GHZ; 282 u8 band = NL80211_BAND_2GHZ;
283 283
284 /* Write rate duration table */ 284 /* Write rate duration table */
285 for (i = 0; i < ah->sbands[band].n_bitrates; i++) { 285 for (i = 0; i < ah->sbands[band].n_bitrates; i++) {
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 0fce1c76638e..641b13a279e1 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -75,13 +75,13 @@
75/** 75/**
76 * ath5k_hw_radio_revision() - Get the PHY Chip revision 76 * ath5k_hw_radio_revision() - Get the PHY Chip revision
77 * @ah: The &struct ath5k_hw 77 * @ah: The &struct ath5k_hw
78 * @band: One of enum ieee80211_band 78 * @band: One of enum nl80211_band
79 * 79 *
80 * Returns the revision number of a 2GHz, 5GHz or single chip 80 * Returns the revision number of a 2GHz, 5GHz or single chip
81 * radio. 81 * radio.
82 */ 82 */
83u16 83u16
84ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) 84ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band)
85{ 85{
86 unsigned int i; 86 unsigned int i;
87 u32 srev; 87 u32 srev;
@@ -91,10 +91,10 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
91 * Set the radio chip access register 91 * Set the radio chip access register
92 */ 92 */
93 switch (band) { 93 switch (band) {
94 case IEEE80211_BAND_2GHZ: 94 case NL80211_BAND_2GHZ:
95 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0)); 95 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0));
96 break; 96 break;
97 case IEEE80211_BAND_5GHZ: 97 case NL80211_BAND_5GHZ:
98 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0)); 98 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
99 break; 99 break;
100 default: 100 default:
@@ -138,11 +138,11 @@ ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
138 u16 freq = channel->center_freq; 138 u16 freq = channel->center_freq;
139 139
140 /* Check if the channel is in our supported range */ 140 /* Check if the channel is in our supported range */
141 if (channel->band == IEEE80211_BAND_2GHZ) { 141 if (channel->band == NL80211_BAND_2GHZ) {
142 if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) && 142 if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) &&
143 (freq <= ah->ah_capabilities.cap_range.range_2ghz_max)) 143 (freq <= ah->ah_capabilities.cap_range.range_2ghz_max))
144 return true; 144 return true;
145 } else if (channel->band == IEEE80211_BAND_5GHZ) 145 } else if (channel->band == NL80211_BAND_5GHZ)
146 if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) && 146 if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) &&
147 (freq <= ah->ah_capabilities.cap_range.range_5ghz_max)) 147 (freq <= ah->ah_capabilities.cap_range.range_5ghz_max))
148 return true; 148 return true;
@@ -743,7 +743,7 @@ done:
743/** 743/**
744 * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw 744 * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
745 * @ah: The &struct ath5k_hw 745 * @ah: The &struct ath5k_hw
746 * @band: One of enum ieee80211_band 746 * @band: One of enum nl80211_band
747 * 747 *
748 * Write initial RF gain table to set the RF sensitivity. 748 * Write initial RF gain table to set the RF sensitivity.
749 * 749 *
@@ -751,7 +751,7 @@ done:
751 * with Gain_F calibration 751 * with Gain_F calibration
752 */ 752 */
753static int 753static int
754ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band) 754ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum nl80211_band band)
755{ 755{
756 const struct ath5k_ini_rfgain *ath5k_rfg; 756 const struct ath5k_ini_rfgain *ath5k_rfg;
757 unsigned int i, size, index; 757 unsigned int i, size, index;
@@ -786,7 +786,7 @@ ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
786 return -EINVAL; 786 return -EINVAL;
787 } 787 }
788 788
789 index = (band == IEEE80211_BAND_2GHZ) ? 1 : 0; 789 index = (band == NL80211_BAND_2GHZ) ? 1 : 0;
790 790
791 for (i = 0; i < size; i++) { 791 for (i = 0; i < size; i++) {
792 AR5K_REG_WAIT(i); 792 AR5K_REG_WAIT(i);
@@ -917,7 +917,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah,
917 } 917 }
918 918
919 /* Set Output and Driver bias current (OB/DB) */ 919 /* Set Output and Driver bias current (OB/DB) */
920 if (channel->band == IEEE80211_BAND_2GHZ) { 920 if (channel->band == NL80211_BAND_2GHZ) {
921 921
922 if (channel->hw_value == AR5K_MODE_11B) 922 if (channel->hw_value == AR5K_MODE_11B)
923 ee_mode = AR5K_EEPROM_MODE_11B; 923 ee_mode = AR5K_EEPROM_MODE_11B;
@@ -944,7 +944,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah,
944 AR5K_RF_DB_2GHZ, true); 944 AR5K_RF_DB_2GHZ, true);
945 945
946 /* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */ 946 /* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */
947 } else if ((channel->band == IEEE80211_BAND_5GHZ) || 947 } else if ((channel->band == NL80211_BAND_5GHZ) ||
948 (ah->ah_radio == AR5K_RF5111)) { 948 (ah->ah_radio == AR5K_RF5111)) {
949 949
950 /* For 11a, Turbo and XR we need to choose 950 /* For 11a, Turbo and XR we need to choose
@@ -1145,7 +1145,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah,
1145 } 1145 }
1146 1146
1147 if (ah->ah_radio == AR5K_RF5413 && 1147 if (ah->ah_radio == AR5K_RF5413 &&
1148 channel->band == IEEE80211_BAND_2GHZ) { 1148 channel->band == NL80211_BAND_2GHZ) {
1149 1149
1150 ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE, 1150 ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE,
1151 true); 1151 true);
@@ -1270,7 +1270,7 @@ ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
1270 */ 1270 */
1271 data0 = data1 = 0; 1271 data0 = data1 = 0;
1272 1272
1273 if (channel->band == IEEE80211_BAND_2GHZ) { 1273 if (channel->band == NL80211_BAND_2GHZ) {
1274 /* Map 2GHz channel to 5GHz Atheros channel ID */ 1274 /* Map 2GHz channel to 5GHz Atheros channel ID */
1275 ret = ath5k_hw_rf5111_chan2athchan( 1275 ret = ath5k_hw_rf5111_chan2athchan(
1276 ieee80211_frequency_to_channel(channel->center_freq), 1276 ieee80211_frequency_to_channel(channel->center_freq),
@@ -1446,7 +1446,7 @@ ath5k_hw_channel(struct ath5k_hw *ah,
1446 "channel frequency (%u MHz) out of supported " 1446 "channel frequency (%u MHz) out of supported "
1447 "band range\n", 1447 "band range\n",
1448 channel->center_freq); 1448 channel->center_freq);
1449 return -EINVAL; 1449 return -EINVAL;
1450 } 1450 }
1451 1451
1452 /* 1452 /*
@@ -1919,7 +1919,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1919 /* Convert current frequency to fbin value (the same way channels 1919 /* Convert current frequency to fbin value (the same way channels
1920 * are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale 1920 * are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale
1921 * up by 2 so we can compare it later */ 1921 * up by 2 so we can compare it later */
1922 if (channel->band == IEEE80211_BAND_2GHZ) { 1922 if (channel->band == NL80211_BAND_2GHZ) {
1923 chan_fbin = (channel->center_freq - 2300) * 10; 1923 chan_fbin = (channel->center_freq - 2300) * 10;
1924 freq_band = AR5K_EEPROM_BAND_2GHZ; 1924 freq_band = AR5K_EEPROM_BAND_2GHZ;
1925 } else { 1925 } else {
@@ -1983,7 +1983,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1983 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4; 1983 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
1984 break; 1984 break;
1985 default: 1985 default:
1986 if (channel->band == IEEE80211_BAND_5GHZ) { 1986 if (channel->band == NL80211_BAND_5GHZ) {
1987 /* Both sample_freq and chip_freq are 40MHz */ 1987 /* Both sample_freq and chip_freq are 40MHz */
1988 spur_delta_phase = (spur_offset << 17) / 25; 1988 spur_delta_phase = (spur_offset << 17) / 25;
1989 spur_freq_sigma_delta = 1989 spur_freq_sigma_delta =
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index ddaad712c59a..beda11ce34a7 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -559,7 +559,7 @@ ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
559int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) 559int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
560{ 560{
561 struct ieee80211_channel *channel = ah->ah_current_channel; 561 struct ieee80211_channel *channel = ah->ah_current_channel;
562 enum ieee80211_band band; 562 enum nl80211_band band;
563 struct ieee80211_supported_band *sband; 563 struct ieee80211_supported_band *sband;
564 struct ieee80211_rate *rate; 564 struct ieee80211_rate *rate;
565 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock; 565 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
@@ -596,10 +596,10 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
596 * 596 *
597 * Also we have different lowest rate for 802.11a 597 * Also we have different lowest rate for 802.11a
598 */ 598 */
599 if (channel->band == IEEE80211_BAND_5GHZ) 599 if (channel->band == NL80211_BAND_5GHZ)
600 band = IEEE80211_BAND_5GHZ; 600 band = NL80211_BAND_5GHZ;
601 else 601 else
602 band = IEEE80211_BAND_2GHZ; 602 band = NL80211_BAND_2GHZ;
603 603
604 switch (ah->ah_bwmode) { 604 switch (ah->ah_bwmode) {
605 case AR5K_BWMODE_5MHZ: 605 case AR5K_BWMODE_5MHZ:
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 99e62f99a182..56d7925a0c2c 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -634,7 +634,7 @@ ath5k_hw_on_hold(struct ath5k_hw *ah)
634 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | 634 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
635 AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA | 635 AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
636 AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); 636 AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
637 usleep_range(2000, 2500); 637 usleep_range(2000, 2500);
638 } else { 638 } else {
639 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | 639 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
640 AR5K_RESET_CTL_BASEBAND | bus_flags); 640 AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -699,7 +699,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
699 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | 699 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
700 AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA | 700 AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
701 AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); 701 AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
702 usleep_range(2000, 2500); 702 usleep_range(2000, 2500);
703 } else { 703 } else {
704 if (ath5k_get_bus_type(ah) == ATH_AHB) 704 if (ath5k_get_bus_type(ah) == ATH_AHB)
705 ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU | 705 ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -752,7 +752,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
752 clock = AR5K_PHY_PLL_RF5111; /*Zero*/ 752 clock = AR5K_PHY_PLL_RF5111; /*Zero*/
753 } 753 }
754 754
755 if (channel->band == IEEE80211_BAND_2GHZ) { 755 if (channel->band == NL80211_BAND_2GHZ) {
756 mode |= AR5K_PHY_MODE_FREQ_2GHZ; 756 mode |= AR5K_PHY_MODE_FREQ_2GHZ;
757 clock |= AR5K_PHY_PLL_44MHZ; 757 clock |= AR5K_PHY_PLL_44MHZ;
758 758
@@ -771,7 +771,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
771 else 771 else
772 mode |= AR5K_PHY_MODE_MOD_DYN; 772 mode |= AR5K_PHY_MODE_MOD_DYN;
773 } 773 }
774 } else if (channel->band == IEEE80211_BAND_5GHZ) { 774 } else if (channel->band == NL80211_BAND_5GHZ) {
775 mode |= (AR5K_PHY_MODE_FREQ_5GHZ | 775 mode |= (AR5K_PHY_MODE_FREQ_5GHZ |
776 AR5K_PHY_MODE_MOD_OFDM); 776 AR5K_PHY_MODE_MOD_OFDM);
777 777
@@ -906,7 +906,7 @@ ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
906 u32 data; 906 u32 data;
907 ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD, 907 ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
908 AR5K_PHY_CCKTXCTL); 908 AR5K_PHY_CCKTXCTL);
909 if (channel->band == IEEE80211_BAND_5GHZ) 909 if (channel->band == NL80211_BAND_5GHZ)
910 data = 0xffb81020; 910 data = 0xffb81020;
911 else 911 else
912 data = 0xffb80d20; 912 data = 0xffb80d20;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 7f3f94fbf157..4e11ba06f089 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -34,7 +34,7 @@
34} 34}
35 35
36#define CHAN2G(_channel, _freq, _flags) { \ 36#define CHAN2G(_channel, _freq, _flags) { \
37 .band = IEEE80211_BAND_2GHZ, \ 37 .band = NL80211_BAND_2GHZ, \
38 .hw_value = (_channel), \ 38 .hw_value = (_channel), \
39 .center_freq = (_freq), \ 39 .center_freq = (_freq), \
40 .flags = (_flags), \ 40 .flags = (_flags), \
@@ -43,7 +43,7 @@
43} 43}
44 44
45#define CHAN5G(_channel, _flags) { \ 45#define CHAN5G(_channel, _flags) { \
46 .band = IEEE80211_BAND_5GHZ, \ 46 .band = NL80211_BAND_5GHZ, \
47 .hw_value = (_channel), \ 47 .hw_value = (_channel), \
48 .center_freq = 5000 + (5 * (_channel)), \ 48 .center_freq = 5000 + (5 * (_channel)), \
49 .flags = (_flags), \ 49 .flags = (_flags), \
@@ -2583,7 +2583,7 @@ void ath6kl_check_wow_status(struct ath6kl *ar)
2583} 2583}
2584#endif 2584#endif
2585 2585
2586static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band, 2586static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum nl80211_band band,
2587 bool ht_enable) 2587 bool ht_enable)
2588{ 2588{
2589 struct ath6kl_htcap *htcap = &vif->htcap[band]; 2589 struct ath6kl_htcap *htcap = &vif->htcap[band];
@@ -2594,7 +2594,7 @@ static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
2594 if (ht_enable) { 2594 if (ht_enable) {
2595 /* Set default ht capabilities */ 2595 /* Set default ht capabilities */
2596 htcap->ht_enable = true; 2596 htcap->ht_enable = true;
2597 htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ? 2597 htcap->cap_info = (band == NL80211_BAND_2GHZ) ?
2598 ath6kl_g_htcap : ath6kl_a_htcap; 2598 ath6kl_g_htcap : ath6kl_a_htcap;
2599 htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K; 2599 htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K;
2600 } else /* Disable ht */ 2600 } else /* Disable ht */
@@ -2609,7 +2609,7 @@ static int ath6kl_restore_htcap(struct ath6kl_vif *vif)
2609 struct wiphy *wiphy = vif->ar->wiphy; 2609 struct wiphy *wiphy = vif->ar->wiphy;
2610 int band, ret = 0; 2610 int band, ret = 0;
2611 2611
2612 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 2612 for (band = 0; band < NUM_NL80211_BANDS; band++) {
2613 if (!wiphy->bands[band]) 2613 if (!wiphy->bands[band])
2614 continue; 2614 continue;
2615 2615
@@ -3530,7 +3530,7 @@ static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
3530 struct regulatory_request *request) 3530 struct regulatory_request *request)
3531{ 3531{
3532 struct ath6kl *ar = wiphy_priv(wiphy); 3532 struct ath6kl *ar = wiphy_priv(wiphy);
3533 u32 rates[IEEE80211_NUM_BANDS]; 3533 u32 rates[NUM_NL80211_BANDS];
3534 int ret, i; 3534 int ret, i;
3535 3535
3536 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 3536 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
@@ -3555,7 +3555,7 @@ static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
3555 * changed. 3555 * changed.
3556 */ 3556 */
3557 3557
3558 for (i = 0; i < IEEE80211_NUM_BANDS; i++) 3558 for (i = 0; i < NUM_NL80211_BANDS; i++)
3559 if (wiphy->bands[i]) 3559 if (wiphy->bands[i])
3560 rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; 3560 rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
3561 3561
@@ -3791,8 +3791,8 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
3791 vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL; 3791 vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
3792 vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME; 3792 vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
3793 vif->bg_scan_period = 0; 3793 vif->bg_scan_period = 0;
3794 vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true; 3794 vif->htcap[NL80211_BAND_2GHZ].ht_enable = true;
3795 vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true; 3795 vif->htcap[NL80211_BAND_5GHZ].ht_enable = true;
3796 3796
3797 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); 3797 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
3798 if (fw_vif_idx != 0) { 3798 if (fw_vif_idx != 0) {
@@ -3943,9 +3943,9 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3943 wiphy->available_antennas_rx = ar->hw.rx_ant; 3943 wiphy->available_antennas_rx = ar->hw.rx_ant;
3944 3944
3945 if (band_2gig) 3945 if (band_2gig)
3946 wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz; 3946 wiphy->bands[NL80211_BAND_2GHZ] = &ath6kl_band_2ghz;
3947 if (band_5gig) 3947 if (band_5gig)
3948 wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz; 3948 wiphy->bands[NL80211_BAND_5GHZ] = &ath6kl_band_5ghz;
3949 3949
3950 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 3950 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
3951 3951
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 5f3acfe6015e..713a571a27ce 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -623,7 +623,7 @@ struct ath6kl_vif {
623 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; 623 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
624 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; 624 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
625 struct aggr_info *aggr_cntxt; 625 struct aggr_info *aggr_cntxt;
626 struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS]; 626 struct ath6kl_htcap htcap[NUM_NL80211_BANDS];
627 627
628 struct timer_list disconnect_timer; 628 struct timer_list disconnect_timer;
629 struct timer_list sched_scan_timer; 629 struct timer_list sched_scan_timer;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index a5e1de75a4a3..631c3a0c572b 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1584,6 +1584,11 @@ static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len,
1584 if (len < sizeof(*ev)) 1584 if (len < sizeof(*ev))
1585 return -EINVAL; 1585 return -EINVAL;
1586 1586
1587 if (vif->nw_type != INFRA_NETWORK ||
1588 !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY,
1589 vif->ar->fw_capabilities))
1590 return -EOPNOTSUPP;
1591
1587 if (vif->sme_state != SME_CONNECTED) 1592 if (vif->sme_state != SME_CONNECTED)
1588 return -ENOTCONN; 1593 return -ENOTCONN;
1589 1594
@@ -2043,7 +2048,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
2043 sc->no_cck = cpu_to_le32(no_cck); 2048 sc->no_cck = cpu_to_le32(no_cck);
2044 sc->num_ch = num_chan; 2049 sc->num_ch = num_chan;
2045 2050
2046 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 2051 for (band = 0; band < NUM_NL80211_BANDS; band++) {
2047 sband = ar->wiphy->bands[band]; 2052 sband = ar->wiphy->bands[band];
2048 2053
2049 if (!sband) 2054 if (!sband)
@@ -2765,10 +2770,10 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
2765 memset(&ratemask, 0, sizeof(ratemask)); 2770 memset(&ratemask, 0, sizeof(ratemask));
2766 2771
2767 /* only check 2.4 and 5 GHz bands, skip the rest */ 2772 /* only check 2.4 and 5 GHz bands, skip the rest */
2768 for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) { 2773 for (band = 0; band <= NL80211_BAND_5GHZ; band++) {
2769 /* copy legacy rate mask */ 2774 /* copy legacy rate mask */
2770 ratemask[band] = mask->control[band].legacy; 2775 ratemask[band] = mask->control[band].legacy;
2771 if (band == IEEE80211_BAND_5GHZ) 2776 if (band == NL80211_BAND_5GHZ)
2772 ratemask[band] = 2777 ratemask[band] =
2773 mask->control[band].legacy << 4; 2778 mask->control[band].legacy << 4;
2774 2779
@@ -2794,9 +2799,9 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
2794 if (mode == WMI_RATES_MODE_11A || 2799 if (mode == WMI_RATES_MODE_11A ||
2795 mode == WMI_RATES_MODE_11A_HT20 || 2800 mode == WMI_RATES_MODE_11A_HT20 ||
2796 mode == WMI_RATES_MODE_11A_HT40) 2801 mode == WMI_RATES_MODE_11A_HT40)
2797 band = IEEE80211_BAND_5GHZ; 2802 band = NL80211_BAND_5GHZ;
2798 else 2803 else
2799 band = IEEE80211_BAND_2GHZ; 2804 band = NL80211_BAND_2GHZ;
2800 cmd->ratemask[mode] = cpu_to_le64(ratemask[band]); 2805 cmd->ratemask[mode] = cpu_to_le64(ratemask[band]);
2801 } 2806 }
2802 2807
@@ -2817,10 +2822,10 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
2817 memset(&ratemask, 0, sizeof(ratemask)); 2822 memset(&ratemask, 0, sizeof(ratemask));
2818 2823
2819 /* only check 2.4 and 5 GHz bands, skip the rest */ 2824 /* only check 2.4 and 5 GHz bands, skip the rest */
2820 for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) { 2825 for (band = 0; band <= NL80211_BAND_5GHZ; band++) {
2821 /* copy legacy rate mask */ 2826 /* copy legacy rate mask */
2822 ratemask[band] = mask->control[band].legacy; 2827 ratemask[band] = mask->control[band].legacy;
2823 if (band == IEEE80211_BAND_5GHZ) 2828 if (band == NL80211_BAND_5GHZ)
2824 ratemask[band] = 2829 ratemask[band] =
2825 mask->control[band].legacy << 4; 2830 mask->control[band].legacy << 4;
2826 2831
@@ -2844,9 +2849,9 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
2844 if (mode == WMI_RATES_MODE_11A || 2849 if (mode == WMI_RATES_MODE_11A ||
2845 mode == WMI_RATES_MODE_11A_HT20 || 2850 mode == WMI_RATES_MODE_11A_HT20 ||
2846 mode == WMI_RATES_MODE_11A_HT40) 2851 mode == WMI_RATES_MODE_11A_HT40)
2847 band = IEEE80211_BAND_5GHZ; 2852 band = NL80211_BAND_5GHZ;
2848 else 2853 else
2849 band = IEEE80211_BAND_2GHZ; 2854 band = NL80211_BAND_2GHZ;
2850 cmd->ratemask[mode] = cpu_to_le32(ratemask[band]); 2855 cmd->ratemask[mode] = cpu_to_le32(ratemask[band]);
2851 } 2856 }
2852 2857
@@ -3169,7 +3174,7 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
3169} 3174}
3170 3175
3171int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx, 3176int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
3172 enum ieee80211_band band, 3177 enum nl80211_band band,
3173 struct ath6kl_htcap *htcap) 3178 struct ath6kl_htcap *htcap)
3174{ 3179{
3175 struct sk_buff *skb; 3180 struct sk_buff *skb;
@@ -3182,7 +3187,7 @@ int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
3182 cmd = (struct wmi_set_htcap_cmd *) skb->data; 3187 cmd = (struct wmi_set_htcap_cmd *) skb->data;
3183 3188
3184 /* 3189 /*
3185 * NOTE: Band in firmware matches enum ieee80211_band, it is unlikely 3190 * NOTE: Band in firmware matches enum nl80211_band, it is unlikely
3186 * this will be changed in firmware. If at all there is any change in 3191 * this will be changed in firmware. If at all there is any change in
3187 * band value, the host needs to be fixed. 3192 * band value, the host needs to be fixed.
3188 */ 3193 */
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 05d25a94c781..3af464a73b58 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -2628,7 +2628,7 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
2628int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, 2628int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
2629 u8 keep_alive_intvl); 2629 u8 keep_alive_intvl);
2630int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx, 2630int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
2631 enum ieee80211_band band, 2631 enum nl80211_band band,
2632 struct ath6kl_htcap *htcap); 2632 struct ath6kl_htcap *htcap);
2633int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len); 2633int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
2634 2634
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index c38399bc9aa9..c07866a2fdf9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -331,7 +331,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
331 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 331 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
332 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 332 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
333 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 333 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
334 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, 334 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
335 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27}, 335 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
336 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 336 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
337 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 337 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -351,7 +351,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
351 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, 351 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
352 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, 352 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
353 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, 353 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
354 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 354 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
355 {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982}, 355 {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
356 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 356 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
357 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 357 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 0c391997a2f7..518e649ecff3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1203,12 +1203,12 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
1203static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g) 1203static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
1204{ 1204{
1205 int offset[8] = {0}, total = 0, test; 1205 int offset[8] = {0}, total = 0, test;
1206 int agc_out, i, peak_detect_threshold; 1206 int agc_out, i, peak_detect_threshold = 0;
1207 1207
1208 if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) 1208 if (AR_SREV_9550(ah) || AR_SREV_9531(ah))
1209 peak_detect_threshold = 8; 1209 peak_detect_threshold = 8;
1210 else 1210 else if (AR_SREV_9561(ah))
1211 peak_detect_threshold = 0; 1211 peak_detect_threshold = 11;
1212 1212
1213 /* 1213 /*
1214 * Turn off LNA/SW. 1214 * Turn off LNA/SW.
@@ -1249,17 +1249,14 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
1249 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), 1249 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
1250 AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0); 1250 AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
1251 1251
1252 if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) || 1252 if (is_2g)
1253 AR_SREV_9561(ah)) { 1253 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
1254 if (is_2g) 1254 AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
1255 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), 1255 peak_detect_threshold);
1256 AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 1256 else
1257 peak_detect_threshold); 1257 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
1258 else 1258 AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
1259 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), 1259 peak_detect_threshold);
1260 AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
1261 peak_detect_threshold);
1262 }
1263 1260
1264 for (i = 6; i > 0; i--) { 1261 for (i = 6; i > 0; i--) {
1265 offset[i] = BIT(i - 1); 1262 offset[i] = BIT(i - 1);
@@ -1311,9 +1308,6 @@ static void ar9003_hw_do_pcoem_manual_peak_cal(struct ath_hw *ah,
1311 struct ath9k_hw_cal_data *caldata = ah->caldata; 1308 struct ath9k_hw_cal_data *caldata = ah->caldata;
1312 int i; 1309 int i;
1313 1310
1314 if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
1315 return;
1316
1317 if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal) 1311 if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
1318 return; 1312 return;
1319 1313
@@ -1641,14 +1635,12 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
1641 1635
1642skip_tx_iqcal: 1636skip_tx_iqcal:
1643 if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { 1637 if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
1644 if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) || 1638 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
1645 AR_SREV_9561(ah)) { 1639 if (!(ah->rxchainmask & (1 << i)))
1646 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 1640 continue;
1647 if (!(ah->rxchainmask & (1 << i))) 1641
1648 continue; 1642 ar9003_hw_manual_peak_cal(ah, i,
1649 ar9003_hw_manual_peak_cal(ah, i, 1643 IS_CHAN_2GHZ(chan));
1650 IS_CHAN_2GHZ(chan));
1651 }
1652 } 1644 }
1653 1645
1654 /* 1646 /*
@@ -1709,7 +1701,7 @@ void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
1709 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); 1701 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1710 struct ath_hw_ops *ops = ath9k_hw_ops(ah); 1702 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
1711 1703
1712 if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9565(ah)) 1704 if (AR_SREV_9003_PCOEM(ah))
1713 priv_ops->init_cal = ar9003_hw_init_cal_pcoem; 1705 priv_ops->init_cal = ar9003_hw_init_cal_pcoem;
1714 else 1706 else
1715 priv_ops->init_cal = ar9003_hw_init_cal_soc; 1707 priv_ops->init_cal = ar9003_hw_init_cal_soc;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 54ed2f72d35e..f68098284c43 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3590,8 +3590,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3590 else 3590 else
3591 gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485; 3591 gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485;
3592 3592
3593 ath9k_hw_cfg_output(ah, gpio, 3593 ath9k_hw_gpio_request_out(ah, gpio, NULL,
3594 AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED); 3594 AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
3595 } 3595 }
3596 3596
3597 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz); 3597 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
@@ -4097,16 +4097,16 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
4097 REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, 4097 REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
4098 AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on); 4098 AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
4099 4099
4100 therm_on = (thermometer < 0) ? 0 : (thermometer == 0); 4100 therm_on = thermometer == 0;
4101 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4, 4101 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
4102 AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); 4102 AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
4103 if (pCap->chip_chainmask & BIT(1)) { 4103 if (pCap->chip_chainmask & BIT(1)) {
4104 therm_on = (thermometer < 0) ? 0 : (thermometer == 1); 4104 therm_on = thermometer == 1;
4105 REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4, 4105 REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
4106 AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); 4106 AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
4107 } 4107 }
4108 if (pCap->chip_chainmask & BIT(2)) { 4108 if (pCap->chip_chainmask & BIT(2)) {
4109 therm_on = (thermometer < 0) ? 0 : (thermometer == 2); 4109 therm_on = thermometer == 2;
4110 REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, 4110 REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
4111 AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); 4111 AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
4112 } 4112 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index af5ee416a560..0fe9c8378249 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -427,21 +427,34 @@ static void ar9003_mci_observation_set_up(struct ath_hw *ah)
427 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 427 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
428 428
429 if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) { 429 if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
430 ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA); 430 ath9k_hw_gpio_request_out(ah, 3, NULL,
431 ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK); 431 AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
432 ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA); 432 ath9k_hw_gpio_request_out(ah, 2, NULL,
433 ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK); 433 AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
434 ath9k_hw_gpio_request_out(ah, 1, NULL,
435 AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
436 ath9k_hw_gpio_request_out(ah, 0, NULL,
437 AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
434 } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) { 438 } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
435 ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX); 439 ath9k_hw_gpio_request_out(ah, 3, NULL,
436 ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX); 440 AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
437 ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX); 441 ath9k_hw_gpio_request_out(ah, 2, NULL,
438 ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX); 442 AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
439 ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 443 ath9k_hw_gpio_request_out(ah, 1, NULL,
444 AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
445 ath9k_hw_gpio_request_out(ah, 0, NULL,
446 AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
447 ath9k_hw_gpio_request_out(ah, 5, NULL,
448 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
440 } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) { 449 } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
441 ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX); 450 ath9k_hw_gpio_request_out(ah, 3, NULL,
442 ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX); 451 AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
443 ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA); 452 ath9k_hw_gpio_request_out(ah, 2, NULL,
444 ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK); 453 AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
454 ath9k_hw_gpio_request_out(ah, 1, NULL,
455 AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
456 ath9k_hw_gpio_request_out(ah, 0, NULL,
457 AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
445 } else 458 } else
446 return; 459 return;
447 460
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 06c1ca6e8290..be14a8e01916 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1337,11 +1337,11 @@ skip_ws_det:
1337 chan->channel, 1337 chan->channel,
1338 aniState->mrcCCK ? "on" : "off", 1338 aniState->mrcCCK ? "on" : "off",
1339 is_on ? "on" : "off"); 1339 is_on ? "on" : "off");
1340 if (is_on) 1340 if (is_on)
1341 ah->stats.ast_ani_ccklow++; 1341 ah->stats.ast_ani_ccklow++;
1342 else 1342 else
1343 ah->stats.ast_ani_cckhigh++; 1343 ah->stats.ast_ani_cckhigh++;
1344 aniState->mrcCCK = is_on; 1344 aniState->mrcCCK = is_on;
1345 } 1345 }
1346 break; 1346 break;
1347 } 1347 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index 2c42ff05efa3..29479afbc4f1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -40,7 +40,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
40 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 40 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
41 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 41 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
42 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221}, 42 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
43 {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222}, 43 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
44 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324}, 44 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
45 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010}, 45 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
46 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 46 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -59,7 +59,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
59 {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 59 {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
60 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 60 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
61 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 61 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
62 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18}, 62 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
63 {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982}, 63 {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
64 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, 64 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
65 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 65 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 2154efcd3900..c4a6ffa55e8c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -345,7 +345,7 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = {
345 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 345 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
346 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 346 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
347 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221}, 347 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
348 {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222}, 348 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
349 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324}, 349 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
350 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010}, 350 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
351 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 351 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -364,7 +364,7 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = {
364 {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 364 {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
365 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 365 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
366 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 366 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
367 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 367 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
368 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981}, 368 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
369 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, 369 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
370 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 370 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index b995ffe88b33..2eb163fc1c18 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -245,7 +245,7 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
245 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 245 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
246 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 246 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
247 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 247 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
248 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, 248 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
249 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27}, 249 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
250 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 250 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
251 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 251 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -265,7 +265,7 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
265 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, 265 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
266 {0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110}, 266 {0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110},
267 {0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222}, 267 {0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222},
268 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 268 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
269 {0x0000a2d0, 0x00041983, 0x00041983, 0x00041982, 0x00041982}, 269 {0x0000a2d0, 0x00041983, 0x00041983, 0x00041982, 0x00041982},
270 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a}, 270 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
271 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 271 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 1b6b4d0cfa97..b00dd649453d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -59,7 +59,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
59 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 59 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
60 {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5}, 60 {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
61 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 61 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
62 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, 62 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280},
63 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, 63 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
64 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 64 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
65 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 65 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -79,7 +79,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
79 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, 79 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
80 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, 80 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
81 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, 81 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
82 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 82 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
83 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, 83 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
84 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 84 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
85 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 85 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
index dc3adda46e8b..0f8745ec73b1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
@@ -239,7 +239,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
239 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 239 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
240 {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5}, 240 {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
241 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 241 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
242 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, 242 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280},
243 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, 243 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
244 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 244 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
245 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 245 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -259,7 +259,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
259 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, 259 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
260 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, 260 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
261 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, 261 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
262 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 262 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
263 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, 263 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
264 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 264 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
265 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 265 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index ce83ce47a1ca..bdf6f107f6f1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -1026,7 +1026,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
1026 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e}, 1026 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
1027 {0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53}, 1027 {0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
1028 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 1028 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
1029 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, 1029 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
1030 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010}, 1030 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
1031 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 1031 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
1032 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0}, 1032 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
@@ -1044,7 +1044,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
1044 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0}, 1044 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
1045 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1045 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1046 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1046 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1047 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 1047 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
1048 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982}, 1048 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
1049 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, 1049 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
1050 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1050 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index c0b90daa3e3d..924ae6bde7f1 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -988,7 +988,7 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = {
988 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 988 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
989 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 989 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
990 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 990 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
991 {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222}, 991 {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946220, 0xcf946220},
992 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, 992 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
993 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 993 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
994 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 994 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1008,7 +1008,7 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = {
1008 {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010}, 1008 {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
1009 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, 1009 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
1010 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, 1010 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
1011 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 1011 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
1012 {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33}, 1012 {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
1013 {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982}, 1013 {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
1014 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 1014 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index 148562addd38..67edf344b427 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -83,7 +83,7 @@ static const u32 ar955x_1p0_baseband_postamble[][5] = {
83 {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010}, 83 {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
84 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, 84 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
85 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, 85 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
86 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18}, 86 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
87 {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33}, 87 {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
88 {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982}, 88 {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
89 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 89 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index 10d4a6cb1c3b..35c1bbb2fa8a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -347,7 +347,7 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
347 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 347 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
348 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4}, 348 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
349 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 349 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
350 {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220}, 350 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
351 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, 351 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
352 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 352 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
353 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 353 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
index c3a47eaaf0c0..db051071c676 100644
--- a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
@@ -220,7 +220,7 @@ static const u32 qca956x_1p0_baseband_postamble[][5] = {
220 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 220 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
221 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a6, 0x000003a6}, 221 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a6, 0x000003a6},
222 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 222 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
223 {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222}, 223 {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946220, 0xcf946220},
224 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, 224 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
225 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 225 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
226 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 226 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index 5d4629f96c15..f4c9befb3949 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -1290,7 +1290,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
1290 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 1290 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
1291 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 1291 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
1292 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 1292 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
1293 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, 1293 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
1294 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27}, 1294 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
1295 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 1295 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
1296 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 1296 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1310,7 +1310,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
1310 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, 1310 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
1311 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, 1311 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
1312 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, 1312 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
1313 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 1313 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
1314 {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982}, 1314 {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
1315 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 1315 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
1316 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1316 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 5294595da5a7..93b3793cce2f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -813,7 +813,6 @@ static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
813#ifdef CONFIG_MAC80211_LEDS 813#ifdef CONFIG_MAC80211_LEDS
814void ath_init_leds(struct ath_softc *sc); 814void ath_init_leds(struct ath_softc *sc);
815void ath_deinit_leds(struct ath_softc *sc); 815void ath_deinit_leds(struct ath_softc *sc);
816void ath_fill_led_pin(struct ath_softc *sc);
817#else 816#else
818static inline void ath_init_leds(struct ath_softc *sc) 817static inline void ath_init_leds(struct ath_softc *sc)
819{ 818{
@@ -822,9 +821,6 @@ static inline void ath_init_leds(struct ath_softc *sc)
822static inline void ath_deinit_leds(struct ath_softc *sc) 821static inline void ath_deinit_leds(struct ath_softc *sc)
823{ 822{
824} 823}
825static inline void ath_fill_led_pin(struct ath_softc *sc)
826{
827}
828#endif 824#endif
829 825
830/************************/ 826/************************/
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 5a084d94ed90..618c9df35fc1 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -15,6 +15,8 @@
15 */ 15 */
16 16
17#include <linux/export.h> 17#include <linux/export.h>
18#include <linux/types.h>
19#include <linux/ath9k_platform.h>
18#include "hw.h" 20#include "hw.h"
19 21
20enum ath_bt_mode { 22enum ath_bt_mode {
@@ -34,6 +36,8 @@ struct ath_btcoex_config {
34 u8 bt_priority_time; 36 u8 bt_priority_time;
35 u8 bt_first_slot_time; 37 u8 bt_first_slot_time;
36 bool bt_hold_rx_clear; 38 bool bt_hold_rx_clear;
39 u8 wl_active_time;
40 u8 wl_qc_time;
37}; 41};
38 42
39static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX] 43static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
@@ -65,31 +69,71 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
65 .bt_priority_time = 2, 69 .bt_priority_time = 2,
66 .bt_first_slot_time = 5, 70 .bt_first_slot_time = 5,
67 .bt_hold_rx_clear = true, 71 .bt_hold_rx_clear = true,
72 .wl_active_time = 0x20,
73 .wl_qc_time = 0x20,
68 }; 74 };
69 bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity; 75 bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
76 u8 time_extend = ath_bt_config.bt_time_extend;
77 u8 first_slot_time = ath_bt_config.bt_first_slot_time;
70 78
71 if (AR_SREV_9300_20_OR_LATER(ah)) 79 if (AR_SREV_9300_20_OR_LATER(ah))
72 rxclear_polarity = !ath_bt_config.bt_rxclear_polarity; 80 rxclear_polarity = !ath_bt_config.bt_rxclear_polarity;
73 81
82 if (AR_SREV_SOC(ah)) {
83 first_slot_time = 0x1d;
84 time_extend = 0xa;
85
86 btcoex_hw->bt_coex_mode3 =
87 SM(ath_bt_config.wl_active_time, AR_BT_WL_ACTIVE_TIME) |
88 SM(ath_bt_config.wl_qc_time, AR_BT_WL_QC_TIME);
89
90 btcoex_hw->bt_coex_mode2 =
91 AR_BT_PROTECT_BT_AFTER_WAKEUP |
92 AR_BT_PHY_ERR_BT_COLL_ENABLE;
93 }
94
74 btcoex_hw->bt_coex_mode = 95 btcoex_hw->bt_coex_mode =
75 (btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) | 96 (btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) |
76 SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) | 97 SM(time_extend, AR_BT_TIME_EXTEND) |
77 SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) | 98 SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) |
78 SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) | 99 SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) |
79 SM(ath_bt_config.bt_mode, AR_BT_MODE) | 100 SM(ath_bt_config.bt_mode, AR_BT_MODE) |
80 SM(ath_bt_config.bt_quiet_collision, AR_BT_QUIET) | 101 SM(ath_bt_config.bt_quiet_collision, AR_BT_QUIET) |
81 SM(rxclear_polarity, AR_BT_RX_CLEAR_POLARITY) | 102 SM(rxclear_polarity, AR_BT_RX_CLEAR_POLARITY) |
82 SM(ath_bt_config.bt_priority_time, AR_BT_PRIORITY_TIME) | 103 SM(ath_bt_config.bt_priority_time, AR_BT_PRIORITY_TIME) |
83 SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) | 104 SM(first_slot_time, AR_BT_FIRST_SLOT_TIME) |
84 SM(qnum, AR_BT_QCU_THRESH); 105 SM(qnum, AR_BT_QCU_THRESH);
85 106
86 btcoex_hw->bt_coex_mode2 = 107 btcoex_hw->bt_coex_mode2 |=
87 SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) | 108 SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
88 SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) | 109 SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
89 AR_BT_DISABLE_BT_ANT; 110 AR_BT_DISABLE_BT_ANT;
90} 111}
91EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw); 112EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
92 113
114static void ath9k_hw_btcoex_pin_init(struct ath_hw *ah, u8 wlanactive_gpio,
115 u8 btactive_gpio, u8 btpriority_gpio)
116{
117 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
118 struct ath9k_platform_data *pdata = ah->dev->platform_data;
119
120 if (btcoex_hw->scheme != ATH_BTCOEX_CFG_2WIRE &&
121 btcoex_hw->scheme != ATH_BTCOEX_CFG_3WIRE)
122 return;
123
124 /* bt priority GPIO will be ignored by 2 wire scheme */
125 if (pdata && (pdata->bt_active_pin || pdata->bt_priority_pin ||
126 pdata->wlan_active_pin)) {
127 btcoex_hw->btactive_gpio = pdata->bt_active_pin;
128 btcoex_hw->wlanactive_gpio = pdata->wlan_active_pin;
129 btcoex_hw->btpriority_gpio = pdata->bt_priority_pin;
130 } else {
131 btcoex_hw->btactive_gpio = btactive_gpio;
132 btcoex_hw->wlanactive_gpio = wlanactive_gpio;
133 btcoex_hw->btpriority_gpio = btpriority_gpio;
134 }
135}
136
93void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah) 137void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
94{ 138{
95 struct ath_common *common = ath9k_hw_common(ah); 139 struct ath_common *common = ath9k_hw_common(ah);
@@ -107,19 +151,19 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
107 btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI; 151 btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
108 } else if (AR_SREV_9300_20_OR_LATER(ah)) { 152 } else if (AR_SREV_9300_20_OR_LATER(ah)) {
109 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; 153 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
110 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
111 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
112 btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9300;
113 } else if (AR_SREV_9280_20_OR_LATER(ah)) {
114 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9280;
115 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9280;
116 154
117 if (AR_SREV_9285(ah)) { 155 ath9k_hw_btcoex_pin_init(ah, ATH_WLANACTIVE_GPIO_9300,
156 ATH_BTACTIVE_GPIO_9300,
157 ATH_BTPRIORITY_GPIO_9300);
158 } else if (AR_SREV_9280_20_OR_LATER(ah)) {
159 if (AR_SREV_9285(ah))
118 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; 160 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
119 btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9285; 161 else
120 } else {
121 btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE; 162 btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
122 } 163
164 ath9k_hw_btcoex_pin_init(ah, ATH_WLANACTIVE_GPIO_9280,
165 ATH_BTACTIVE_GPIO_9280,
166 ATH_BTPRIORITY_GPIO_9285);
123 } 167 }
124} 168}
125EXPORT_SYMBOL(ath9k_hw_btcoex_init_scheme); 169EXPORT_SYMBOL(ath9k_hw_btcoex_init_scheme);
@@ -137,12 +181,14 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
137 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB); 181 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
138 182
139 /* Set input mux for bt_active to gpio pin */ 183 /* Set input mux for bt_active to gpio pin */
140 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, 184 if (!AR_SREV_SOC(ah))
141 AR_GPIO_INPUT_MUX1_BT_ACTIVE, 185 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
142 btcoex_hw->btactive_gpio); 186 AR_GPIO_INPUT_MUX1_BT_ACTIVE,
187 btcoex_hw->btactive_gpio);
143 188
144 /* Configure the desired gpio port for input */ 189 /* Configure the desired gpio port for input */
145 ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio); 190 ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio,
191 "ath9k-btactive");
146} 192}
147EXPORT_SYMBOL(ath9k_hw_btcoex_init_2wire); 193EXPORT_SYMBOL(ath9k_hw_btcoex_init_2wire);
148 194
@@ -157,21 +203,33 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
157 203
158 /* Set input mux for bt_prority_async and 204 /* Set input mux for bt_prority_async and
159 * bt_active_async to GPIO pins */ 205 * bt_active_async to GPIO pins */
160 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, 206 if (!AR_SREV_SOC(ah)) {
161 AR_GPIO_INPUT_MUX1_BT_ACTIVE, 207 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
162 btcoex_hw->btactive_gpio); 208 AR_GPIO_INPUT_MUX1_BT_ACTIVE,
163 209 btcoex_hw->btactive_gpio);
164 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, 210 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
165 AR_GPIO_INPUT_MUX1_BT_PRIORITY, 211 AR_GPIO_INPUT_MUX1_BT_PRIORITY,
166 btcoex_hw->btpriority_gpio); 212 btcoex_hw->btpriority_gpio);
213 }
167 214
168 /* Configure the desired GPIO ports for input */ 215 /* Configure the desired GPIO ports for input */
169 216 ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio,
170 ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio); 217 "ath9k-btactive");
171 ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btpriority_gpio); 218 ath9k_hw_gpio_request_in(ah, btcoex_hw->btpriority_gpio,
219 "ath9k-btpriority");
172} 220}
173EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire); 221EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire);
174 222
223void ath9k_hw_btcoex_deinit(struct ath_hw *ah)
224{
225 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
226
227 ath9k_hw_gpio_free(ah, btcoex_hw->btactive_gpio);
228 ath9k_hw_gpio_free(ah, btcoex_hw->btpriority_gpio);
229 ath9k_hw_gpio_free(ah, btcoex_hw->wlanactive_gpio);
230}
231EXPORT_SYMBOL(ath9k_hw_btcoex_deinit);
232
175void ath9k_hw_btcoex_init_mci(struct ath_hw *ah) 233void ath9k_hw_btcoex_init_mci(struct ath_hw *ah)
176{ 234{
177 ah->btcoex_hw.mci.ready = false; 235 ah->btcoex_hw.mci.ready = false;
@@ -201,8 +259,9 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
201 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 259 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
202 260
203 /* Configure the desired GPIO port for TX_FRAME output */ 261 /* Configure the desired GPIO port for TX_FRAME output */
204 ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, 262 ath9k_hw_gpio_request_out(ah, btcoex_hw->wlanactive_gpio,
205 AR_GPIO_OUTPUT_MUX_AS_TX_FRAME); 263 "ath9k-wlanactive",
264 AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
206} 265}
207 266
208/* 267/*
@@ -247,13 +306,13 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
247 txprio_shift[i-1]); 306 txprio_shift[i-1]);
248 } 307 }
249 } 308 }
309
250 /* Last WLAN weight has to be adjusted wrt tx priority */ 310 /* Last WLAN weight has to be adjusted wrt tx priority */
251 if (concur_tx) { 311 if (concur_tx) {
252 btcoex_hw->wlan_weight[i-1] &= ~(0xff << txprio_shift[i-1]); 312 btcoex_hw->wlan_weight[i-1] &= ~(0xff << txprio_shift[i-1]);
253 btcoex_hw->wlan_weight[i-1] |= (btcoex_hw->tx_prio[stomp_type] 313 btcoex_hw->wlan_weight[i-1] |= (btcoex_hw->tx_prio[stomp_type]
254 << txprio_shift[i-1]); 314 << txprio_shift[i-1]);
255 } 315 }
256
257} 316}
258EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight); 317EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
259 318
@@ -268,9 +327,14 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
268 * Program coex mode and weight registers to 327 * Program coex mode and weight registers to
269 * enable coex 3-wire 328 * enable coex 3-wire
270 */ 329 */
330 if (AR_SREV_SOC(ah))
331 REG_CLR_BIT(ah, AR_BT_COEX_MODE2, AR_BT_PHY_ERR_BT_COLL_ENABLE);
332
271 REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode); 333 REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
272 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2); 334 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
273 335
336 if (AR_SREV_SOC(ah))
337 REG_WRITE(ah, AR_BT_COEX_MODE3, btcoex->bt_coex_mode3);
274 338
275 if (AR_SREV_9300_20_OR_LATER(ah)) { 339 if (AR_SREV_9300_20_OR_LATER(ah)) {
276 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]); 340 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
@@ -281,8 +345,6 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
281 } else 345 } else
282 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights); 346 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
283 347
284
285
286 if (AR_SREV_9271(ah)) { 348 if (AR_SREV_9271(ah)) {
287 val = REG_READ(ah, 0x50040); 349 val = REG_READ(ah, 0x50040);
288 val &= 0xFFFFFEFF; 350 val &= 0xFFFFFEFF;
@@ -292,8 +354,9 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
292 REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1); 354 REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
293 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); 355 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
294 356
295 ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio, 357 ath9k_hw_gpio_request_out(ah, btcoex->wlanactive_gpio,
296 AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL); 358 "ath9k-wlanactive",
359 AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
297} 360}
298 361
299static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah) 362static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
@@ -339,7 +402,8 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
339 break; 402 break;
340 } 403 }
341 404
342 if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) { 405 if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI &&
406 !AR_SREV_SOC(ah)) {
343 REG_RMW(ah, AR_GPIO_PDPU, 407 REG_RMW(ah, AR_GPIO_PDPU,
344 (0x2 << (btcoex_hw->btactive_gpio * 2)), 408 (0x2 << (btcoex_hw->btactive_gpio * 2)),
345 (0x3 << (btcoex_hw->btactive_gpio * 2))); 409 (0x3 << (btcoex_hw->btactive_gpio * 2)));
@@ -364,8 +428,8 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
364 if (!AR_SREV_9300_20_OR_LATER(ah)) 428 if (!AR_SREV_9300_20_OR_LATER(ah))
365 ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0); 429 ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
366 430
367 ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, 431 ath9k_hw_gpio_request_out(ah, btcoex_hw->wlanactive_gpio,
368 AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 432 NULL, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
369 433
370 if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) { 434 if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) {
371 REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE); 435 REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index cd2f0a2373cb..1bdfa8465b92 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -115,6 +115,7 @@ struct ath_btcoex_hw {
115 u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */ 115 u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */
116 u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */ 116 u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */
117 u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */ 117 u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
118 u32 bt_coex_mode3; /* Register setting for AR_BT_COEX_MODE3 */
118 u32 bt_weight[AR9300_NUM_BT_WEIGHTS]; 119 u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
119 u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS]; 120 u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
120 u8 tx_prio[ATH_BTCOEX_STOMP_MAX]; 121 u8 tx_prio[ATH_BTCOEX_STOMP_MAX];
@@ -123,6 +124,7 @@ struct ath_btcoex_hw {
123void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah); 124void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah);
124void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah); 125void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
125void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah); 126void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah);
127void ath9k_hw_btcoex_deinit(struct ath_hw *ah);
126void ath9k_hw_btcoex_init_mci(struct ath_hw *ah); 128void ath9k_hw_btcoex_init_mci(struct ath_hw *ah);
127void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum); 129void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
128void ath9k_hw_btcoex_set_weight(struct ath_hw *ah, 130void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 37f6d66d1671..0f71146b781d 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -145,14 +145,14 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
145} 145}
146 146
147static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah, 147static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
148 enum ieee80211_band band, 148 enum nl80211_band band,
149 int16_t *nft) 149 int16_t *nft)
150{ 150{
151 switch (band) { 151 switch (band) {
152 case IEEE80211_BAND_5GHZ: 152 case NL80211_BAND_5GHZ:
153 *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5); 153 *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5);
154 break; 154 break;
155 case IEEE80211_BAND_2GHZ: 155 case NL80211_BAND_2GHZ:
156 *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2); 156 *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2);
157 break; 157 break;
158 default: 158 default:
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 319cb5f25f58..e56bafcf5864 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -107,9 +107,9 @@ void ath_chanctx_init(struct ath_softc *sc)
107 struct ieee80211_channel *chan; 107 struct ieee80211_channel *chan;
108 int i, j; 108 int i, j;
109 109
110 sband = &common->sbands[IEEE80211_BAND_2GHZ]; 110 sband = &common->sbands[NL80211_BAND_2GHZ];
111 if (!sband->n_channels) 111 if (!sband->n_channels)
112 sband = &common->sbands[IEEE80211_BAND_5GHZ]; 112 sband = &common->sbands[NL80211_BAND_5GHZ];
113 113
114 chan = &sband->channels[0]; 114 chan = &sband->channels[0];
115 for (i = 0; i < ATH9K_NUM_CHANCTX; i++) { 115 for (i = 0; i < ATH9K_NUM_CHANCTX; i++) {
@@ -1333,9 +1333,9 @@ void ath9k_offchannel_init(struct ath_softc *sc)
1333 struct ieee80211_channel *chan; 1333 struct ieee80211_channel *chan;
1334 int i; 1334 int i;
1335 1335
1336 sband = &common->sbands[IEEE80211_BAND_2GHZ]; 1336 sband = &common->sbands[NL80211_BAND_2GHZ];
1337 if (!sband->n_channels) 1337 if (!sband->n_channels)
1338 sband = &common->sbands[IEEE80211_BAND_5GHZ]; 1338 sband = &common->sbands[NL80211_BAND_5GHZ];
1339 1339
1340 chan = &sband->channels[0]; 1340 chan = &sband->channels[0];
1341 1341
diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c
index a006c1499728..8b4f7fdabf58 100644
--- a/drivers/net/wireless/ath/ath9k/common-init.c
+++ b/drivers/net/wireless/ath/ath9k/common-init.c
@@ -19,14 +19,14 @@
19#include "common.h" 19#include "common.h"
20 20
21#define CHAN2G(_freq, _idx) { \ 21#define CHAN2G(_freq, _idx) { \
22 .band = IEEE80211_BAND_2GHZ, \ 22 .band = NL80211_BAND_2GHZ, \
23 .center_freq = (_freq), \ 23 .center_freq = (_freq), \
24 .hw_value = (_idx), \ 24 .hw_value = (_idx), \
25 .max_power = 20, \ 25 .max_power = 20, \
26} 26}
27 27
28#define CHAN5G(_freq, _idx) { \ 28#define CHAN5G(_freq, _idx) { \
29 .band = IEEE80211_BAND_5GHZ, \ 29 .band = NL80211_BAND_5GHZ, \
30 .center_freq = (_freq), \ 30 .center_freq = (_freq), \
31 .hw_value = (_idx), \ 31 .hw_value = (_idx), \
32 .max_power = 20, \ 32 .max_power = 20, \
@@ -139,12 +139,12 @@ int ath9k_cmn_init_channels_rates(struct ath_common *common)
139 139
140 memcpy(channels, ath9k_2ghz_chantable, 140 memcpy(channels, ath9k_2ghz_chantable,
141 sizeof(ath9k_2ghz_chantable)); 141 sizeof(ath9k_2ghz_chantable));
142 common->sbands[IEEE80211_BAND_2GHZ].channels = channels; 142 common->sbands[NL80211_BAND_2GHZ].channels = channels;
143 common->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; 143 common->sbands[NL80211_BAND_2GHZ].band = NL80211_BAND_2GHZ;
144 common->sbands[IEEE80211_BAND_2GHZ].n_channels = 144 common->sbands[NL80211_BAND_2GHZ].n_channels =
145 ARRAY_SIZE(ath9k_2ghz_chantable); 145 ARRAY_SIZE(ath9k_2ghz_chantable);
146 common->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates; 146 common->sbands[NL80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
147 common->sbands[IEEE80211_BAND_2GHZ].n_bitrates = 147 common->sbands[NL80211_BAND_2GHZ].n_bitrates =
148 ARRAY_SIZE(ath9k_legacy_rates); 148 ARRAY_SIZE(ath9k_legacy_rates);
149 } 149 }
150 150
@@ -156,13 +156,13 @@ int ath9k_cmn_init_channels_rates(struct ath_common *common)
156 156
157 memcpy(channels, ath9k_5ghz_chantable, 157 memcpy(channels, ath9k_5ghz_chantable,
158 sizeof(ath9k_5ghz_chantable)); 158 sizeof(ath9k_5ghz_chantable));
159 common->sbands[IEEE80211_BAND_5GHZ].channels = channels; 159 common->sbands[NL80211_BAND_5GHZ].channels = channels;
160 common->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; 160 common->sbands[NL80211_BAND_5GHZ].band = NL80211_BAND_5GHZ;
161 common->sbands[IEEE80211_BAND_5GHZ].n_channels = 161 common->sbands[NL80211_BAND_5GHZ].n_channels =
162 ARRAY_SIZE(ath9k_5ghz_chantable); 162 ARRAY_SIZE(ath9k_5ghz_chantable);
163 common->sbands[IEEE80211_BAND_5GHZ].bitrates = 163 common->sbands[NL80211_BAND_5GHZ].bitrates =
164 ath9k_legacy_rates + 4; 164 ath9k_legacy_rates + 4;
165 common->sbands[IEEE80211_BAND_5GHZ].n_bitrates = 165 common->sbands[NL80211_BAND_5GHZ].n_bitrates =
166 ARRAY_SIZE(ath9k_legacy_rates) - 4; 166 ARRAY_SIZE(ath9k_legacy_rates) - 4;
167 } 167 }
168 return 0; 168 return 0;
@@ -236,9 +236,9 @@ void ath9k_cmn_reload_chainmask(struct ath_hw *ah)
236 236
237 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 237 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
238 ath9k_cmn_setup_ht_cap(ah, 238 ath9k_cmn_setup_ht_cap(ah,
239 &common->sbands[IEEE80211_BAND_2GHZ].ht_cap); 239 &common->sbands[NL80211_BAND_2GHZ].ht_cap);
240 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 240 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
241 ath9k_cmn_setup_ht_cap(ah, 241 ath9k_cmn_setup_ht_cap(ah,
242 &common->sbands[IEEE80211_BAND_5GHZ].ht_cap); 242 &common->sbands[NL80211_BAND_5GHZ].ht_cap);
243} 243}
244EXPORT_SYMBOL(ath9k_cmn_reload_chainmask); 244EXPORT_SYMBOL(ath9k_cmn_reload_chainmask);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index e8c699446470..b80e08b13b74 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -173,7 +173,7 @@ int ath9k_cmn_process_rate(struct ath_common *common,
173 struct ieee80211_rx_status *rxs) 173 struct ieee80211_rx_status *rxs)
174{ 174{
175 struct ieee80211_supported_band *sband; 175 struct ieee80211_supported_band *sband;
176 enum ieee80211_band band; 176 enum nl80211_band band;
177 unsigned int i = 0; 177 unsigned int i = 0;
178 struct ath_hw *ah = common->ah; 178 struct ath_hw *ah = common->ah;
179 179
@@ -305,7 +305,7 @@ static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
305 ichan->channel = chan->center_freq; 305 ichan->channel = chan->center_freq;
306 ichan->chan = chan; 306 ichan->chan = chan;
307 307
308 if (chan->band == IEEE80211_BAND_5GHZ) 308 if (chan->band == NL80211_BAND_5GHZ)
309 flags |= CHANNEL_5GHZ; 309 flags |= CHANNEL_5GHZ;
310 310
311 switch (chandef->width) { 311 switch (chandef->width) {
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 6de64cface3c..c56e40ff35e5 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -916,10 +916,21 @@ static int open_file_regdump(struct inode *inode, struct file *file)
916 struct ath_softc *sc = inode->i_private; 916 struct ath_softc *sc = inode->i_private;
917 unsigned int len = 0; 917 unsigned int len = 0;
918 u8 *buf; 918 u8 *buf;
919 int i; 919 int i, j = 0;
920 unsigned long num_regs, regdump_len, max_reg_offset; 920 unsigned long num_regs, regdump_len, max_reg_offset;
921 const struct reg_hole {
922 u32 start;
923 u32 end;
924 } reg_hole_list[] = {
925 {0x0200, 0x07fc},
926 {0x0c00, 0x0ffc},
927 {0x2000, 0x3ffc},
928 {0x4100, 0x6ffc},
929 {0x705c, 0x7ffc},
930 {0x0000, 0x0000}
931 };
921 932
922 max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500; 933 max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x8800 : 0xb500;
923 num_regs = max_reg_offset / 4 + 1; 934 num_regs = max_reg_offset / 4 + 1;
924 regdump_len = num_regs * REGDUMP_LINE_SIZE + 1; 935 regdump_len = num_regs * REGDUMP_LINE_SIZE + 1;
925 buf = vmalloc(regdump_len); 936 buf = vmalloc(regdump_len);
@@ -927,9 +938,16 @@ static int open_file_regdump(struct inode *inode, struct file *file)
927 return -ENOMEM; 938 return -ENOMEM;
928 939
929 ath9k_ps_wakeup(sc); 940 ath9k_ps_wakeup(sc);
930 for (i = 0; i < num_regs; i++) 941 for (i = 0; i < num_regs; i++) {
942 if (reg_hole_list[j].start == i << 2) {
943 i = reg_hole_list[j].end >> 2;
944 j++;
945 continue;
946 }
947
931 len += scnprintf(buf + len, regdump_len - len, 948 len += scnprintf(buf + len, regdump_len - len,
932 "0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2)); 949 "0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2));
950 }
933 ath9k_ps_restore(sc); 951 ath9k_ps_restore(sc);
934 952
935 file->private_data = buf; 953 file->private_data = buf;
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index c2ca57a2ed09..b66cfa91364f 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -139,7 +139,7 @@ void ath_debug_rate_stats(struct ath_softc *sc,
139 } 139 }
140 140
141 if (IS_OFDM_RATE(rs->rs_rate)) { 141 if (IS_OFDM_RATE(rs->rs_rate)) {
142 if (ah->curchan->chan->band == IEEE80211_BAND_2GHZ) 142 if (ah->curchan->chan->band == NL80211_BAND_2GHZ)
143 rstats->ofdm_stats[rxs->rate_idx - 4].ofdm_cnt++; 143 rstats->ofdm_stats[rxs->rate_idx - 4].ofdm_cnt++;
144 else 144 else
145 rstats->ofdm_stats[rxs->rate_idx].ofdm_cnt++; 145 rstats->ofdm_stats[rxs->rate_idx].ofdm_cnt++;
@@ -173,7 +173,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
173 struct ath_hw *ah = sc->sc_ah; 173 struct ath_hw *ah = sc->sc_ah;
174 struct ath_rx_rate_stats *rstats; 174 struct ath_rx_rate_stats *rstats;
175 struct ieee80211_sta *sta = an->sta; 175 struct ieee80211_sta *sta = an->sta;
176 enum ieee80211_band band; 176 enum nl80211_band band;
177 u32 len = 0, size = 4096; 177 u32 len = 0, size = 4096;
178 char *buf; 178 char *buf;
179 size_t retval; 179 size_t retval;
@@ -206,7 +206,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
206 len += scnprintf(buf + len, size - len, "\n"); 206 len += scnprintf(buf + len, size - len, "\n");
207 207
208legacy: 208legacy:
209 if (band == IEEE80211_BAND_2GHZ) { 209 if (band == NL80211_BAND_2GHZ) {
210 PRINT_CCK_RATE("CCK-1M/LP", 0, false); 210 PRINT_CCK_RATE("CCK-1M/LP", 0, false);
211 PRINT_CCK_RATE("CCK-2M/LP", 1, false); 211 PRINT_CCK_RATE("CCK-2M/LP", 1, false);
212 PRINT_CCK_RATE("CCK-5.5M/LP", 2, false); 212 PRINT_CCK_RATE("CCK-5.5M/LP", 2, false);
diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
index 22b3cc4c27cd..d2ff0fc0484c 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.c
+++ b/drivers/net/wireless/ath/ath9k/dynack.c
@@ -212,7 +212,7 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
212 struct ieee80211_tx_rate *rates = info->status.rates; 212 struct ieee80211_tx_rate *rates = info->status.rates;
213 213
214 rate = &common->sbands[info->band].bitrates[rates[ridx].idx]; 214 rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
215 if (info->band == IEEE80211_BAND_2GHZ && 215 if (info->band == NL80211_BAND_2GHZ &&
216 !(rate->flags & IEEE80211_RATE_ERP_G)) 216 !(rate->flags & IEEE80211_RATE_ERP_G))
217 phy = WLAN_RC_PHY_CCK; 217 phy = WLAN_RC_PHY_CCK;
218 else 218 else
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 284706798c71..490f74d9ddf0 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -21,6 +21,33 @@
21/********************************/ 21/********************************/
22 22
23#ifdef CONFIG_MAC80211_LEDS 23#ifdef CONFIG_MAC80211_LEDS
24
25void ath_fill_led_pin(struct ath_softc *sc)
26{
27 struct ath_hw *ah = sc->sc_ah;
28
29 /* Set default led pin if invalid */
30 if (ah->led_pin < 0) {
31 if (AR_SREV_9287(ah))
32 ah->led_pin = ATH_LED_PIN_9287;
33 else if (AR_SREV_9485(ah))
34 ah->led_pin = ATH_LED_PIN_9485;
35 else if (AR_SREV_9300(ah))
36 ah->led_pin = ATH_LED_PIN_9300;
37 else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
38 ah->led_pin = ATH_LED_PIN_9462;
39 else
40 ah->led_pin = ATH_LED_PIN_DEF;
41 }
42
43 /* Configure gpio for output */
44 ath9k_hw_gpio_request_out(ah, ah->led_pin, "ath9k-led",
45 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
46
47 /* LED off, active low */
48 ath9k_hw_set_gpio(ah, ah->led_pin, ah->config.led_active_high ? 0 : 1);
49}
50
24static void ath_led_brightness(struct led_classdev *led_cdev, 51static void ath_led_brightness(struct led_classdev *led_cdev,
25 enum led_brightness brightness) 52 enum led_brightness brightness)
26{ 53{
@@ -40,6 +67,8 @@ void ath_deinit_leds(struct ath_softc *sc)
40 67
41 ath_led_brightness(&sc->led_cdev, LED_OFF); 68 ath_led_brightness(&sc->led_cdev, LED_OFF);
42 led_classdev_unregister(&sc->led_cdev); 69 led_classdev_unregister(&sc->led_cdev);
70
71 ath9k_hw_gpio_free(sc->sc_ah, sc->sc_ah->led_pin);
43} 72}
44 73
45void ath_init_leds(struct ath_softc *sc) 74void ath_init_leds(struct ath_softc *sc)
@@ -49,6 +78,8 @@ void ath_init_leds(struct ath_softc *sc)
49 if (AR_SREV_9100(sc->sc_ah)) 78 if (AR_SREV_9100(sc->sc_ah))
50 return; 79 return;
51 80
81 ath_fill_led_pin(sc);
82
52 if (!ath9k_led_blink) 83 if (!ath9k_led_blink)
53 sc->led_cdev.default_trigger = 84 sc->led_cdev.default_trigger =
54 ieee80211_get_radio_led_name(sc->hw); 85 ieee80211_get_radio_led_name(sc->hw);
@@ -64,37 +95,6 @@ void ath_init_leds(struct ath_softc *sc)
64 95
65 sc->led_registered = true; 96 sc->led_registered = true;
66} 97}
67
68void ath_fill_led_pin(struct ath_softc *sc)
69{
70 struct ath_hw *ah = sc->sc_ah;
71
72 if (AR_SREV_9100(ah))
73 return;
74
75 if (ah->led_pin >= 0) {
76 if (!((1 << ah->led_pin) & AR_GPIO_OE_OUT_MASK))
77 ath9k_hw_request_gpio(ah, ah->led_pin, "ath9k-led");
78 return;
79 }
80
81 if (AR_SREV_9287(ah))
82 ah->led_pin = ATH_LED_PIN_9287;
83 else if (AR_SREV_9485(sc->sc_ah))
84 ah->led_pin = ATH_LED_PIN_9485;
85 else if (AR_SREV_9300(sc->sc_ah))
86 ah->led_pin = ATH_LED_PIN_9300;
87 else if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
88 ah->led_pin = ATH_LED_PIN_9462;
89 else
90 ah->led_pin = ATH_LED_PIN_DEF;
91
92 /* Configure gpio 1 for output */
93 ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
94
95 /* LED off, active low */
96 ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1);
97}
98#endif 98#endif
99 99
100/*******************/ 100/*******************/
@@ -402,6 +402,13 @@ void ath9k_deinit_btcoex(struct ath_softc *sc)
402 402
403 if (ath9k_hw_mci_is_enabled(ah)) 403 if (ath9k_hw_mci_is_enabled(ah))
404 ath_mci_cleanup(sc); 404 ath_mci_cleanup(sc);
405 else {
406 enum ath_btcoex_scheme scheme = ath9k_hw_get_btcoex_scheme(ah);
407
408 if (scheme == ATH_BTCOEX_CFG_2WIRE ||
409 scheme == ATH_BTCOEX_CFG_3WIRE)
410 ath9k_hw_btcoex_deinit(sc->sc_ah);
411 }
405} 412}
406 413
407int ath9k_init_btcoex(struct ath_softc *sc) 414int ath9k_init_btcoex(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 8cbf4904db7b..e1c338cb9cb5 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -527,7 +527,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
527 struct sk_buff *skb) 527 struct sk_buff *skb)
528{ 528{
529 struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER]; 529 struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
530 int index = 0, i = 0, len = skb->len; 530 int index = 0, i, len = skb->len;
531 int rx_remain_len, rx_pkt_len; 531 int rx_remain_len, rx_pkt_len;
532 u16 pool_index = 0; 532 u16 pool_index = 0;
533 u8 *ptr; 533 u8 *ptr;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 2aabcbdaba4e..ecb848b60725 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -253,17 +253,19 @@ void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
253 ath9k_led_brightness(&priv->led_cdev, LED_OFF); 253 ath9k_led_brightness(&priv->led_cdev, LED_OFF);
254 led_classdev_unregister(&priv->led_cdev); 254 led_classdev_unregister(&priv->led_cdev);
255 cancel_work_sync(&priv->led_work); 255 cancel_work_sync(&priv->led_work);
256
257 ath9k_hw_gpio_free(priv->ah, priv->ah->led_pin);
256} 258}
257 259
258 260
259void ath9k_configure_leds(struct ath9k_htc_priv *priv) 261void ath9k_configure_leds(struct ath9k_htc_priv *priv)
260{ 262{
261 /* Configure gpio 1 for output */ 263 /* Configure gpio 1 for output */
262 ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin, 264 ath9k_hw_gpio_request_out(priv->ah, priv->ah->led_pin,
263 AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 265 "ath9k-led",
266 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
264 /* LED off, active low */ 267 /* LED off, active low */
265 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1); 268 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
266
267} 269}
268 270
269void ath9k_init_leds(struct ath9k_htc_priv *priv) 271void ath9k_init_leds(struct ath9k_htc_priv *priv)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 8647ab77c019..c148c6c504f7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -262,11 +262,11 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
262 __be32 tmpval[8]; 262 __be32 tmpval[8];
263 int i, ret; 263 int i, ret;
264 264
265 for (i = 0; i < count; i++) { 265 for (i = 0; i < count; i++) {
266 tmpaddr[i] = cpu_to_be32(addr[i]); 266 tmpaddr[i] = cpu_to_be32(addr[i]);
267 } 267 }
268 268
269 ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID, 269 ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
270 (u8 *)tmpaddr , sizeof(u32) * count, 270 (u8 *)tmpaddr , sizeof(u32) * count,
271 (u8 *)tmpval, sizeof(u32) * count, 271 (u8 *)tmpval, sizeof(u32) * count,
272 100); 272 100);
@@ -275,9 +275,9 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
275 "Multiple REGISTER READ FAILED (count: %d)\n", count); 275 "Multiple REGISTER READ FAILED (count: %d)\n", count);
276 } 276 }
277 277
278 for (i = 0; i < count; i++) { 278 for (i = 0; i < count; i++) {
279 val[i] = be32_to_cpu(tmpval[i]); 279 val[i] = be32_to_cpu(tmpval[i]);
280 } 280 }
281} 281}
282 282
283static void ath9k_regwrite_multi(struct ath_common *common) 283static void ath9k_regwrite_multi(struct ath_common *common)
@@ -765,11 +765,11 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
765 sizeof(struct htc_frame_hdr) + 4; 765 sizeof(struct htc_frame_hdr) + 4;
766 766
767 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 767 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
768 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 768 hw->wiphy->bands[NL80211_BAND_2GHZ] =
769 &common->sbands[IEEE80211_BAND_2GHZ]; 769 &common->sbands[NL80211_BAND_2GHZ];
770 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 770 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
771 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 771 hw->wiphy->bands[NL80211_BAND_5GHZ] =
772 &common->sbands[IEEE80211_BAND_5GHZ]; 772 &common->sbands[NL80211_BAND_5GHZ];
773 773
774 ath9k_cmn_reload_chainmask(ah); 774 ath9k_cmn_reload_chainmask(ah);
775 775
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 639294a9e34d..8a8d7853da15 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1770,8 +1770,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
1770 memset(&tmask, 0, sizeof(struct ath9k_htc_target_rate_mask)); 1770 memset(&tmask, 0, sizeof(struct ath9k_htc_target_rate_mask));
1771 1771
1772 tmask.vif_index = avp->index; 1772 tmask.vif_index = avp->index;
1773 tmask.band = IEEE80211_BAND_2GHZ; 1773 tmask.band = NL80211_BAND_2GHZ;
1774 tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_2GHZ].legacy); 1774 tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_2GHZ].legacy);
1775 1775
1776 WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask); 1776 WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
1777 if (ret) { 1777 if (ret) {
@@ -1781,8 +1781,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
1781 goto out; 1781 goto out;
1782 } 1782 }
1783 1783
1784 tmask.band = IEEE80211_BAND_5GHZ; 1784 tmask.band = NL80211_BAND_5GHZ;
1785 tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_5GHZ].legacy); 1785 tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_5GHZ].legacy);
1786 1786
1787 WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask); 1787 WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
1788 if (ret) { 1788 if (ret) {
@@ -1793,8 +1793,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
1793 } 1793 }
1794 1794
1795 ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n", 1795 ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n",
1796 mask->control[IEEE80211_BAND_2GHZ].legacy, 1796 mask->control[NL80211_BAND_2GHZ].legacy,
1797 mask->control[IEEE80211_BAND_5GHZ].legacy); 1797 mask->control[NL80211_BAND_5GHZ].legacy);
1798out: 1798out:
1799 return ret; 1799 return ret;
1800} 1800}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index cc9648f844ae..f333ef1e3e7b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -494,7 +494,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
494 if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI) 494 if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI)
495 rate->flags |= IEEE80211_TX_RC_SHORT_GI; 495 rate->flags |= IEEE80211_TX_RC_SHORT_GI;
496 } else { 496 } else {
497 if (cur_conf->chandef.chan->band == IEEE80211_BAND_5GHZ) 497 if (cur_conf->chandef.chan->band == NL80211_BAND_5GHZ)
498 rate->idx += 4; /* No CCK rates */ 498 rate->idx += 4; /* No CCK rates */
499 } 499 }
500 500
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index e7a31016f370..42009065e234 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1582,8 +1582,10 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
1582 if (!(gpio_mask & 1)) 1582 if (!(gpio_mask & 1))
1583 continue; 1583 continue;
1584 1584
1585 ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 1585 ath9k_hw_gpio_request_out(ah, i, NULL,
1586 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1586 ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i))); 1587 ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
1588 ath9k_hw_gpio_free(ah, i);
1587 } 1589 }
1588} 1590}
1589 1591
@@ -1958,7 +1960,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1958 ath9k_hw_init_qos(ah); 1960 ath9k_hw_init_qos(ah);
1959 1961
1960 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 1962 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1961 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); 1963 ath9k_hw_gpio_request_in(ah, ah->rfkill_gpio, "ath9k-rfkill");
1962 1964
1963 ath9k_hw_init_global_settings(ah); 1965 ath9k_hw_init_global_settings(ah);
1964 1966
@@ -2385,6 +2387,61 @@ static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
2385 } 2387 }
2386} 2388}
2387 2389
2390static void ath9k_gpio_cap_init(struct ath_hw *ah)
2391{
2392 struct ath9k_hw_capabilities *pCap = &ah->caps;
2393
2394 if (AR_SREV_9271(ah)) {
2395 pCap->num_gpio_pins = AR9271_NUM_GPIO;
2396 pCap->gpio_mask = AR9271_GPIO_MASK;
2397 } else if (AR_DEVID_7010(ah)) {
2398 pCap->num_gpio_pins = AR7010_NUM_GPIO;
2399 pCap->gpio_mask = AR7010_GPIO_MASK;
2400 } else if (AR_SREV_9287(ah)) {
2401 pCap->num_gpio_pins = AR9287_NUM_GPIO;
2402 pCap->gpio_mask = AR9287_GPIO_MASK;
2403 } else if (AR_SREV_9285(ah)) {
2404 pCap->num_gpio_pins = AR9285_NUM_GPIO;
2405 pCap->gpio_mask = AR9285_GPIO_MASK;
2406 } else if (AR_SREV_9280(ah)) {
2407 pCap->num_gpio_pins = AR9280_NUM_GPIO;
2408 pCap->gpio_mask = AR9280_GPIO_MASK;
2409 } else if (AR_SREV_9300(ah)) {
2410 pCap->num_gpio_pins = AR9300_NUM_GPIO;
2411 pCap->gpio_mask = AR9300_GPIO_MASK;
2412 } else if (AR_SREV_9330(ah)) {
2413 pCap->num_gpio_pins = AR9330_NUM_GPIO;
2414 pCap->gpio_mask = AR9330_GPIO_MASK;
2415 } else if (AR_SREV_9340(ah)) {
2416 pCap->num_gpio_pins = AR9340_NUM_GPIO;
2417 pCap->gpio_mask = AR9340_GPIO_MASK;
2418 } else if (AR_SREV_9462(ah)) {
2419 pCap->num_gpio_pins = AR9462_NUM_GPIO;
2420 pCap->gpio_mask = AR9462_GPIO_MASK;
2421 } else if (AR_SREV_9485(ah)) {
2422 pCap->num_gpio_pins = AR9485_NUM_GPIO;
2423 pCap->gpio_mask = AR9485_GPIO_MASK;
2424 } else if (AR_SREV_9531(ah)) {
2425 pCap->num_gpio_pins = AR9531_NUM_GPIO;
2426 pCap->gpio_mask = AR9531_GPIO_MASK;
2427 } else if (AR_SREV_9550(ah)) {
2428 pCap->num_gpio_pins = AR9550_NUM_GPIO;
2429 pCap->gpio_mask = AR9550_GPIO_MASK;
2430 } else if (AR_SREV_9561(ah)) {
2431 pCap->num_gpio_pins = AR9561_NUM_GPIO;
2432 pCap->gpio_mask = AR9561_GPIO_MASK;
2433 } else if (AR_SREV_9565(ah)) {
2434 pCap->num_gpio_pins = AR9565_NUM_GPIO;
2435 pCap->gpio_mask = AR9565_GPIO_MASK;
2436 } else if (AR_SREV_9580(ah)) {
2437 pCap->num_gpio_pins = AR9580_NUM_GPIO;
2438 pCap->gpio_mask = AR9580_GPIO_MASK;
2439 } else {
2440 pCap->num_gpio_pins = AR_NUM_GPIO;
2441 pCap->gpio_mask = AR_GPIO_MASK;
2442 }
2443}
2444
2388int ath9k_hw_fill_cap_info(struct ath_hw *ah) 2445int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2389{ 2446{
2390 struct ath9k_hw_capabilities *pCap = &ah->caps; 2447 struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -2478,20 +2535,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2478 else 2535 else
2479 pCap->hw_caps &= ~ATH9K_HW_CAP_HT; 2536 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
2480 2537
2481 if (AR_SREV_9271(ah)) 2538 ath9k_gpio_cap_init(ah);
2482 pCap->num_gpio_pins = AR9271_NUM_GPIO;
2483 else if (AR_DEVID_7010(ah))
2484 pCap->num_gpio_pins = AR7010_NUM_GPIO;
2485 else if (AR_SREV_9300_20_OR_LATER(ah))
2486 pCap->num_gpio_pins = AR9300_NUM_GPIO;
2487 else if (AR_SREV_9287_11_OR_LATER(ah))
2488 pCap->num_gpio_pins = AR9287_NUM_GPIO;
2489 else if (AR_SREV_9285_12_OR_LATER(ah))
2490 pCap->num_gpio_pins = AR9285_NUM_GPIO;
2491 else if (AR_SREV_9280_20_OR_LATER(ah))
2492 pCap->num_gpio_pins = AR928X_NUM_GPIO;
2493 else
2494 pCap->num_gpio_pins = AR_NUM_GPIO;
2495 2539
2496 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) 2540 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
2497 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; 2541 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
@@ -2612,8 +2656,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2612/* GPIO / RFKILL / Antennae */ 2656/* GPIO / RFKILL / Antennae */
2613/****************************/ 2657/****************************/
2614 2658
2615static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, 2659static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
2616 u32 gpio, u32 type)
2617{ 2660{
2618 int addr; 2661 int addr;
2619 u32 gpio_shift, tmp; 2662 u32 gpio_shift, tmp;
@@ -2627,8 +2670,8 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
2627 2670
2628 gpio_shift = (gpio % 6) * 5; 2671 gpio_shift = (gpio % 6) * 5;
2629 2672
2630 if (AR_SREV_9280_20_OR_LATER(ah) 2673 if (AR_SREV_9280_20_OR_LATER(ah) ||
2631 || (addr != AR_GPIO_OUTPUT_MUX1)) { 2674 (addr != AR_GPIO_OUTPUT_MUX1)) {
2632 REG_RMW(ah, addr, (type << gpio_shift), 2675 REG_RMW(ah, addr, (type << gpio_shift),
2633 (0x1f << gpio_shift)); 2676 (0x1f << gpio_shift));
2634 } else { 2677 } else {
@@ -2640,106 +2683,144 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
2640 } 2683 }
2641} 2684}
2642 2685
2643void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio) 2686/* BSP should set the corresponding MUX register correctly.
2687 */
2688static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
2689 const char *label)
2644{ 2690{
2645 u32 gpio_shift; 2691 if (ah->caps.gpio_requested & BIT(gpio))
2692 return;
2646 2693
2647 BUG_ON(gpio >= ah->caps.num_gpio_pins); 2694 /* may be requested by BSP, free anyway */
2695 gpio_free(gpio);
2648 2696
2649 if (AR_DEVID_7010(ah)) { 2697 if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label))
2650 gpio_shift = gpio;
2651 REG_RMW(ah, AR7010_GPIO_OE,
2652 (AR7010_GPIO_OE_AS_INPUT << gpio_shift),
2653 (AR7010_GPIO_OE_MASK << gpio_shift));
2654 return; 2698 return;
2655 }
2656 2699
2657 gpio_shift = gpio << 1; 2700 ah->caps.gpio_requested |= BIT(gpio);
2658 REG_RMW(ah,
2659 AR_GPIO_OE_OUT,
2660 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
2661 (AR_GPIO_OE_OUT_DRV << gpio_shift));
2662} 2701}
2663EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input);
2664 2702
2665u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) 2703static void ath9k_hw_gpio_cfg_wmac(struct ath_hw *ah, u32 gpio, bool out,
2704 u32 ah_signal_type)
2666{ 2705{
2667#define MS_REG_READ(x, y) \ 2706 u32 gpio_set, gpio_shift = gpio;
2668 (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y)))
2669
2670 if (gpio >= ah->caps.num_gpio_pins)
2671 return 0xffffffff;
2672 2707
2673 if (AR_DEVID_7010(ah)) { 2708 if (AR_DEVID_7010(ah)) {
2674 u32 val; 2709 gpio_set = out ?
2675 val = REG_READ(ah, AR7010_GPIO_IN); 2710 AR7010_GPIO_OE_AS_OUTPUT : AR7010_GPIO_OE_AS_INPUT;
2676 return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0; 2711 REG_RMW(ah, AR7010_GPIO_OE, gpio_set << gpio_shift,
2677 } else if (AR_SREV_9300_20_OR_LATER(ah)) 2712 AR7010_GPIO_OE_MASK << gpio_shift);
2678 return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) & 2713 } else if (AR_SREV_SOC(ah)) {
2679 AR_GPIO_BIT(gpio)) != 0; 2714 gpio_set = out ? 1 : 0;
2680 else if (AR_SREV_9271(ah)) 2715 REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
2681 return MS_REG_READ(AR9271, gpio) != 0; 2716 gpio_set << gpio_shift);
2682 else if (AR_SREV_9287_11_OR_LATER(ah)) 2717 } else {
2683 return MS_REG_READ(AR9287, gpio) != 0; 2718 gpio_shift = gpio << 1;
2684 else if (AR_SREV_9285_12_OR_LATER(ah)) 2719 gpio_set = out ?
2685 return MS_REG_READ(AR9285, gpio) != 0; 2720 AR_GPIO_OE_OUT_DRV_ALL : AR_GPIO_OE_OUT_DRV_NO;
2686 else if (AR_SREV_9280_20_OR_LATER(ah)) 2721 REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
2687 return MS_REG_READ(AR928X, gpio) != 0; 2722 AR_GPIO_OE_OUT_DRV << gpio_shift);
2688 else 2723
2689 return MS_REG_READ(AR, gpio) != 0; 2724 if (out)
2725 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
2726 }
2690} 2727}
2691EXPORT_SYMBOL(ath9k_hw_gpio_get);
2692 2728
2693void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, 2729static void ath9k_hw_gpio_request(struct ath_hw *ah, u32 gpio, bool out,
2694 u32 ah_signal_type) 2730 const char *label, u32 ah_signal_type)
2695{ 2731{
2696 u32 gpio_shift; 2732 WARN_ON(gpio >= ah->caps.num_gpio_pins);
2697 2733
2698 if (AR_DEVID_7010(ah)) { 2734 if (BIT(gpio) & ah->caps.gpio_mask)
2699 gpio_shift = gpio; 2735 ath9k_hw_gpio_cfg_wmac(ah, gpio, out, ah_signal_type);
2700 REG_RMW(ah, AR7010_GPIO_OE, 2736 else if (AR_SREV_SOC(ah))
2701 (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift), 2737 ath9k_hw_gpio_cfg_soc(ah, gpio, out, label);
2702 (AR7010_GPIO_OE_MASK << gpio_shift)); 2738 else
2703 return; 2739 WARN_ON(1);
2704 } 2740}
2705 2741
2706 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); 2742void ath9k_hw_gpio_request_in(struct ath_hw *ah, u32 gpio, const char *label)
2707 gpio_shift = 2 * gpio; 2743{
2708 REG_RMW(ah, 2744 ath9k_hw_gpio_request(ah, gpio, false, label, 0);
2709 AR_GPIO_OE_OUT,
2710 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
2711 (AR_GPIO_OE_OUT_DRV << gpio_shift));
2712} 2745}
2713EXPORT_SYMBOL(ath9k_hw_cfg_output); 2746EXPORT_SYMBOL(ath9k_hw_gpio_request_in);
2714 2747
2715void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) 2748void ath9k_hw_gpio_request_out(struct ath_hw *ah, u32 gpio, const char *label,
2749 u32 ah_signal_type)
2716{ 2750{
2717 if (AR_DEVID_7010(ah)) { 2751 ath9k_hw_gpio_request(ah, gpio, true, label, ah_signal_type);
2718 val = val ? 0 : 1; 2752}
2719 REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio), 2753EXPORT_SYMBOL(ath9k_hw_gpio_request_out);
2720 AR_GPIO_BIT(gpio)); 2754
2755void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio)
2756{
2757 if (!AR_SREV_SOC(ah))
2721 return; 2758 return;
2759
2760 WARN_ON(gpio >= ah->caps.num_gpio_pins);
2761
2762 if (ah->caps.gpio_requested & BIT(gpio)) {
2763 gpio_free(gpio);
2764 ah->caps.gpio_requested &= ~BIT(gpio);
2722 } 2765 }
2766}
2767EXPORT_SYMBOL(ath9k_hw_gpio_free);
2723 2768
2724 if (AR_SREV_9271(ah)) 2769u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
2725 val = ~val; 2770{
2771 u32 val = 0xffffffff;
2726 2772
2727 if ((1 << gpio) & AR_GPIO_OE_OUT_MASK) 2773#define MS_REG_READ(x, y) \
2728 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), 2774 (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & BIT(y))
2729 AR_GPIO_BIT(gpio)); 2775
2730 else 2776 WARN_ON(gpio >= ah->caps.num_gpio_pins);
2731 gpio_set_value(gpio, val & 1); 2777
2778 if (BIT(gpio) & ah->caps.gpio_mask) {
2779 if (AR_SREV_9271(ah))
2780 val = MS_REG_READ(AR9271, gpio);
2781 else if (AR_SREV_9287(ah))
2782 val = MS_REG_READ(AR9287, gpio);
2783 else if (AR_SREV_9285(ah))
2784 val = MS_REG_READ(AR9285, gpio);
2785 else if (AR_SREV_9280(ah))
2786 val = MS_REG_READ(AR928X, gpio);
2787 else if (AR_DEVID_7010(ah))
2788 val = REG_READ(ah, AR7010_GPIO_IN) & BIT(gpio);
2789 else if (AR_SREV_9300_20_OR_LATER(ah))
2790 val = REG_READ(ah, AR_GPIO_IN) & BIT(gpio);
2791 else
2792 val = MS_REG_READ(AR, gpio);
2793 } else if (BIT(gpio) & ah->caps.gpio_requested) {
2794 val = gpio_get_value(gpio) & BIT(gpio);
2795 } else {
2796 WARN_ON(1);
2797 }
2798
2799 return val;
2732} 2800}
2733EXPORT_SYMBOL(ath9k_hw_set_gpio); 2801EXPORT_SYMBOL(ath9k_hw_gpio_get);
2734 2802
2735void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label) 2803void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
2736{ 2804{
2737 if (gpio >= ah->caps.num_gpio_pins) 2805 WARN_ON(gpio >= ah->caps.num_gpio_pins);
2738 return; 2806
2807 if (AR_DEVID_7010(ah) || AR_SREV_9271(ah))
2808 val = !val;
2809 else
2810 val = !!val;
2739 2811
2740 gpio_request_one(gpio, GPIOF_DIR_OUT | GPIOF_INIT_LOW, label); 2812 if (BIT(gpio) & ah->caps.gpio_mask) {
2813 u32 out_addr = AR_DEVID_7010(ah) ?
2814 AR7010_GPIO_OUT : AR_GPIO_IN_OUT;
2815
2816 REG_RMW(ah, out_addr, val << gpio, BIT(gpio));
2817 } else if (BIT(gpio) & ah->caps.gpio_requested) {
2818 gpio_set_value(gpio, val);
2819 } else {
2820 WARN_ON(1);
2821 }
2741} 2822}
2742EXPORT_SYMBOL(ath9k_hw_request_gpio); 2823EXPORT_SYMBOL(ath9k_hw_set_gpio);
2743 2824
2744void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna) 2825void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
2745{ 2826{
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 831a54415a25..9cbca1229bac 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -160,7 +160,6 @@
160#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e 160#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e
161 161
162#define AR_GPIOD_MASK 0x00001FFF 162#define AR_GPIOD_MASK 0x00001FFF
163#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
164 163
165#define BASE_ACTIVATE_DELAY 100 164#define BASE_ACTIVATE_DELAY 100
166#define RTC_PLL_SETTLE_DELAY (AR_SREV_9340(ah) ? 1000 : 100) 165#define RTC_PLL_SETTLE_DELAY (AR_SREV_9340(ah) ? 1000 : 100)
@@ -301,6 +300,8 @@ struct ath9k_hw_capabilities {
301 u8 max_txchains; 300 u8 max_txchains;
302 u8 max_rxchains; 301 u8 max_rxchains;
303 u8 num_gpio_pins; 302 u8 num_gpio_pins;
303 u32 gpio_mask;
304 u32 gpio_requested;
304 u8 rx_hp_qdepth; 305 u8 rx_hp_qdepth;
305 u8 rx_lp_qdepth; 306 u8 rx_lp_qdepth;
306 u8 rx_status_len; 307 u8 rx_status_len;
@@ -1019,12 +1020,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah);
1019u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan); 1020u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
1020 1021
1021/* GPIO / RFKILL / Antennae */ 1022/* GPIO / RFKILL / Antennae */
1022void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio); 1023void ath9k_hw_gpio_request_in(struct ath_hw *ah, u32 gpio, const char *label);
1024void ath9k_hw_gpio_request_out(struct ath_hw *ah, u32 gpio, const char *label,
1025 u32 ah_signal_type);
1026void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio);
1023u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio); 1027u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
1024void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
1025 u32 ah_signal_type);
1026void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val); 1028void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
1027void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label);
1028void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); 1029void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
1029 1030
1030/* General Operation */ 1031/* General Operation */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 1c226d63bb03..4bf1e244b49b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -660,7 +660,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
660 660
661 ath9k_cmn_init_crypto(sc->sc_ah); 661 ath9k_cmn_init_crypto(sc->sc_ah);
662 ath9k_init_misc(sc); 662 ath9k_init_misc(sc);
663 ath_fill_led_pin(sc);
664 ath_chanctx_init(sc); 663 ath_chanctx_init(sc);
665 ath9k_offchannel_init(sc); 664 ath9k_offchannel_init(sc);
666 665
@@ -706,9 +705,9 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
706 struct ath9k_channel *curchan = ah->curchan; 705 struct ath9k_channel *curchan = ah->curchan;
707 706
708 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 707 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
709 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ); 708 ath9k_init_band_txpower(sc, NL80211_BAND_2GHZ);
710 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 709 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
711 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ); 710 ath9k_init_band_txpower(sc, NL80211_BAND_5GHZ);
712 711
713 ah->curchan = curchan; 712 ah->curchan = curchan;
714} 713}
@@ -880,11 +879,11 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
880 sc->ant_tx = hw->wiphy->available_antennas_tx; 879 sc->ant_tx = hw->wiphy->available_antennas_tx;
881 880
882 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 881 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
883 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 882 hw->wiphy->bands[NL80211_BAND_2GHZ] =
884 &common->sbands[IEEE80211_BAND_2GHZ]; 883 &common->sbands[NL80211_BAND_2GHZ];
885 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 884 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
886 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 885 hw->wiphy->bands[NL80211_BAND_5GHZ] =
887 &common->sbands[IEEE80211_BAND_5GHZ]; 886 &common->sbands[NL80211_BAND_5GHZ];
888 887
889#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT 888#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
890 ath9k_set_mcc_capab(sc, hw); 889 ath9k_set_mcc_capab(sc, hw);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3aed43a63f94..8b6398850657 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -718,12 +718,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
718 if (!ath_complete_reset(sc, false)) 718 if (!ath_complete_reset(sc, false))
719 ah->reset_power_on = false; 719 ah->reset_power_on = false;
720 720
721 if (ah->led_pin >= 0) { 721 if (ah->led_pin >= 0)
722 ath9k_hw_cfg_output(ah, ah->led_pin,
723 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
724 ath9k_hw_set_gpio(ah, ah->led_pin, 722 ath9k_hw_set_gpio(ah, ah->led_pin,
725 (ah->config.led_active_high) ? 1 : 0); 723 (ah->config.led_active_high) ? 1 : 0);
726 }
727 724
728 /* 725 /*
729 * Reset key cache to sane defaults (all entries cleared) instead of 726 * Reset key cache to sane defaults (all entries cleared) instead of
@@ -867,11 +864,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
867 864
868 spin_lock_bh(&sc->sc_pcu_lock); 865 spin_lock_bh(&sc->sc_pcu_lock);
869 866
870 if (ah->led_pin >= 0) { 867 if (ah->led_pin >= 0)
871 ath9k_hw_set_gpio(ah, ah->led_pin, 868 ath9k_hw_set_gpio(ah, ah->led_pin,
872 (ah->config.led_active_high) ? 0 : 1); 869 (ah->config.led_active_high) ? 0 : 1);
873 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
874 }
875 870
876 ath_prepare_reset(sc); 871 ath_prepare_reset(sc);
877 872
@@ -1938,14 +1933,14 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
1938 if (idx == 0) 1933 if (idx == 0)
1939 ath_update_survey_stats(sc); 1934 ath_update_survey_stats(sc);
1940 1935
1941 sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ]; 1936 sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
1942 if (sband && idx >= sband->n_channels) { 1937 if (sband && idx >= sband->n_channels) {
1943 idx -= sband->n_channels; 1938 idx -= sband->n_channels;
1944 sband = NULL; 1939 sband = NULL;
1945 } 1940 }
1946 1941
1947 if (!sband) 1942 if (!sband)
1948 sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ]; 1943 sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
1949 1944
1950 if (!sband || idx >= sband->n_channels) { 1945 if (!sband || idx >= sband->n_channels) {
1951 spin_unlock_bh(&common->cc_lock); 1946 spin_unlock_bh(&common->cc_lock);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index c8d35febaf0f..9272ca90632b 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -985,6 +985,10 @@
985#define AR_SREV_9561(_ah) \ 985#define AR_SREV_9561(_ah) \
986 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561)) 986 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561))
987 987
988#define AR_SREV_SOC(_ah) \
989 (AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(ah) || \
990 AR_SREV_9561(ah))
991
988/* NOTE: When adding chips newer than Peacock, add chip check here */ 992/* NOTE: When adding chips newer than Peacock, add chip check here */
989#define AR_SREV_9580_10_OR_LATER(_ah) \ 993#define AR_SREV_9580_10_OR_LATER(_ah) \
990 (AR_SREV_9580(_ah)) 994 (AR_SREV_9580(_ah))
@@ -1104,14 +1108,46 @@ enum {
1104 1108
1105#define AR_PCIE_PHY_REG3 0x18c08 1109#define AR_PCIE_PHY_REG3 0x18c08
1106 1110
1111/* Define correct GPIO numbers and MASK bits to indicate the WMAC
1112 * GPIO resource.
1113 * Allow SOC chips(AR9340, AR9531, AR9550, AR9561) to access all GPIOs
1114 * which rely on gpiolib framework. But restrict SOC AR9330 only to
1115 * access WMAC GPIO which has the same design with the old chips.
1116 */
1107#define AR_NUM_GPIO 14 1117#define AR_NUM_GPIO 14
1108#define AR928X_NUM_GPIO 10 1118#define AR9280_NUM_GPIO 10
1109#define AR9285_NUM_GPIO 12 1119#define AR9285_NUM_GPIO 12
1110#define AR9287_NUM_GPIO 11 1120#define AR9287_NUM_GPIO 10
1111#define AR9271_NUM_GPIO 16 1121#define AR9271_NUM_GPIO 16
1112#define AR9300_NUM_GPIO 17 1122#define AR9300_NUM_GPIO 16
1123#define AR9330_NUM_GPIO 16
1124#define AR9340_NUM_GPIO 23
1125#define AR9462_NUM_GPIO 10
1126#define AR9485_NUM_GPIO 12
1127#define AR9531_NUM_GPIO 18
1128#define AR9550_NUM_GPIO 24
1129#define AR9561_NUM_GPIO 23
1130#define AR9565_NUM_GPIO 12
1131#define AR9580_NUM_GPIO 16
1113#define AR7010_NUM_GPIO 16 1132#define AR7010_NUM_GPIO 16
1114 1133
1134#define AR_GPIO_MASK 0x00003FFF
1135#define AR9271_GPIO_MASK 0x0000FFFF
1136#define AR9280_GPIO_MASK 0x000003FF
1137#define AR9285_GPIO_MASK 0x00000FFF
1138#define AR9287_GPIO_MASK 0x000003FF
1139#define AR9300_GPIO_MASK 0x0000F4FF
1140#define AR9330_GPIO_MASK 0x0000F4FF
1141#define AR9340_GPIO_MASK 0x0000000F
1142#define AR9462_GPIO_MASK 0x000003FF
1143#define AR9485_GPIO_MASK 0x00000FFF
1144#define AR9531_GPIO_MASK 0x0000000F
1145#define AR9550_GPIO_MASK 0x0000000F
1146#define AR9561_GPIO_MASK 0x0000000F
1147#define AR9565_GPIO_MASK 0x00000FFF
1148#define AR9580_GPIO_MASK 0x0000F4FF
1149#define AR7010_GPIO_MASK 0x0000FFFF
1150
1115#define AR_GPIO_IN_OUT (AR_SREV_9340(ah) ? 0x4028 : 0x4048) 1151#define AR_GPIO_IN_OUT (AR_SREV_9340(ah) ? 0x4028 : 0x4048)
1116#define AR_GPIO_IN_VAL 0x0FFFC000 1152#define AR_GPIO_IN_VAL 0x0FFFC000
1117#define AR_GPIO_IN_VAL_S 14 1153#define AR_GPIO_IN_VAL_S 14
@@ -1132,8 +1168,6 @@ enum {
1132 1168
1133#define AR_GPIO_OE_OUT (AR_SREV_9340(ah) ? 0x4030 : \ 1169#define AR_GPIO_OE_OUT (AR_SREV_9340(ah) ? 0x4030 : \
1134 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)) 1170 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c))
1135#define AR_GPIO_OE_OUT_MASK (AR_SREV_9550_OR_LATER(ah) ? \
1136 0x0000000F : 0xFFFFFFFF)
1137#define AR_GPIO_OE_OUT_DRV 0x3 1171#define AR_GPIO_OE_OUT_DRV 0x3
1138#define AR_GPIO_OE_OUT_DRV_NO 0x0 1172#define AR_GPIO_OE_OUT_DRV_NO 0x0
1139#define AR_GPIO_OE_OUT_DRV_LOW 0x1 1173#define AR_GPIO_OE_OUT_DRV_LOW 0x1
@@ -1858,15 +1892,33 @@ enum {
1858 1892
1859#define AR9300_BT_WGHT 0xcccc4444 1893#define AR9300_BT_WGHT 0xcccc4444
1860 1894
1861#define AR_BT_COEX_MODE2 0x817c 1895#define AR_BT_COEX_MODE2 0x817c
1862#define AR_BT_BCN_MISS_THRESH 0x000000ff 1896#define AR_BT_BCN_MISS_THRESH 0x000000ff
1863#define AR_BT_BCN_MISS_THRESH_S 0 1897#define AR_BT_BCN_MISS_THRESH_S 0
1864#define AR_BT_BCN_MISS_CNT 0x0000ff00 1898#define AR_BT_BCN_MISS_CNT 0x0000ff00
1865#define AR_BT_BCN_MISS_CNT_S 8 1899#define AR_BT_BCN_MISS_CNT_S 8
1866#define AR_BT_HOLD_RX_CLEAR 0x00010000 1900#define AR_BT_HOLD_RX_CLEAR 0x00010000
1867#define AR_BT_HOLD_RX_CLEAR_S 16 1901#define AR_BT_HOLD_RX_CLEAR_S 16
1868#define AR_BT_DISABLE_BT_ANT 0x00100000 1902#define AR_BT_PROTECT_BT_AFTER_WAKEUP 0x00080000
1869#define AR_BT_DISABLE_BT_ANT_S 20 1903#define AR_BT_PROTECT_BT_AFTER_WAKEUP_S 19
1904#define AR_BT_DISABLE_BT_ANT 0x00100000
1905#define AR_BT_DISABLE_BT_ANT_S 20
1906#define AR_BT_QUIET_2_WIRE 0x00200000
1907#define AR_BT_QUIET_2_WIRE_S 21
1908#define AR_BT_WL_ACTIVE_MODE 0x00c00000
1909#define AR_BT_WL_ACTIVE_MODE_S 22
1910#define AR_BT_WL_TXRX_SEPARATE 0x01000000
1911#define AR_BT_WL_TXRX_SEPARATE_S 24
1912#define AR_BT_RS_DISCARD_EXTEND 0x02000000
1913#define AR_BT_RS_DISCARD_EXTEND_S 25
1914#define AR_BT_TSF_BT_ACTIVE_CTRL 0x0c000000
1915#define AR_BT_TSF_BT_ACTIVE_CTRL_S 26
1916#define AR_BT_TSF_BT_PRIORITY_CTRL 0x30000000
1917#define AR_BT_TSF_BT_PRIORITY_CTRL_S 28
1918#define AR_BT_INTERRUPT_ENABLE 0x40000000
1919#define AR_BT_INTERRUPT_ENABLE_S 30
1920#define AR_BT_PHY_ERR_BT_COLL_ENABLE 0x80000000
1921#define AR_BT_PHY_ERR_BT_COLL_ENABLE_S 31
1870 1922
1871#define AR_TXSIFS 0x81d0 1923#define AR_TXSIFS 0x81d0
1872#define AR_TXSIFS_TIME 0x000000FF 1924#define AR_TXSIFS_TIME 0x000000FF
@@ -1875,6 +1927,16 @@ enum {
1875#define AR_TXSIFS_ACK_SHIFT 0x00007000 1927#define AR_TXSIFS_ACK_SHIFT 0x00007000
1876#define AR_TXSIFS_ACK_SHIFT_S 12 1928#define AR_TXSIFS_ACK_SHIFT_S 12
1877 1929
1930#define AR_BT_COEX_MODE3 0x81d4
1931#define AR_BT_WL_ACTIVE_TIME 0x000000ff
1932#define AR_BT_WL_ACTIVE_TIME_S 0
1933#define AR_BT_WL_QC_TIME 0x0000ff00
1934#define AR_BT_WL_QC_TIME_S 8
1935#define AR_BT_ALLOW_CONCURRENT_ACCESS 0x000f0000
1936#define AR_BT_ALLOW_CONCURRENT_ACCESS_S 16
1937#define AR_BT_AGC_SATURATION_CNT_ENABLE 0x00100000
1938#define AR_BT_AGC_SATURATION_CNT_ENABLE_S 20
1939
1878#define AR_TXOP_X 0x81ec 1940#define AR_TXOP_X 0x81ec
1879#define AR_TXOP_X_VAL 0x000000FF 1941#define AR_TXOP_X_VAL 0x000000FF
1880 1942
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index c9cb2aad7b6f..d38e50f96db7 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -55,11 +55,26 @@ static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
55 return j << 2; 55 return j << 2;
56} 56}
57 57
58static u32 ath9k_rng_delay_get(u32 fail_stats)
59{
60 u32 delay;
61
62 if (fail_stats < 100)
63 delay = 10;
64 else if (fail_stats < 105)
65 delay = 1000;
66 else
67 delay = 10000;
68
69 return delay;
70}
71
58static int ath9k_rng_kthread(void *data) 72static int ath9k_rng_kthread(void *data)
59{ 73{
60 int bytes_read; 74 int bytes_read;
61 struct ath_softc *sc = data; 75 struct ath_softc *sc = data;
62 u32 *rng_buf; 76 u32 *rng_buf;
77 u32 delay, fail_stats = 0;
63 78
64 rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL); 79 rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL);
65 if (!rng_buf) 80 if (!rng_buf)
@@ -69,10 +84,13 @@ static int ath9k_rng_kthread(void *data)
69 bytes_read = ath9k_rng_data_read(sc, rng_buf, 84 bytes_read = ath9k_rng_data_read(sc, rng_buf,
70 ATH9K_RNG_BUF_SIZE); 85 ATH9K_RNG_BUF_SIZE);
71 if (unlikely(!bytes_read)) { 86 if (unlikely(!bytes_read)) {
72 msleep_interruptible(10); 87 delay = ath9k_rng_delay_get(++fail_stats);
88 msleep_interruptible(delay);
73 continue; 89 continue;
74 } 90 }
75 91
92 fail_stats = 0;
93
76 /* sleep until entropy bits under write_wakeup_threshold */ 94 /* sleep until entropy bits under write_wakeup_threshold */
77 add_hwgenerator_randomness((void *)rng_buf, bytes_read, 95 add_hwgenerator_randomness((void *)rng_buf, bytes_read,
78 ATH9K_RNG_ENTROPY(bytes_read)); 96 ATH9K_RNG_ENTROPY(bytes_read));
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index fe795fc5288c..8ddd604bd00c 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1112,7 +1112,7 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
1112 bool is_2ghz; 1112 bool is_2ghz;
1113 struct modal_eep_header *pmodal; 1113 struct modal_eep_header *pmodal;
1114 1114
1115 is_2ghz = info->band == IEEE80211_BAND_2GHZ; 1115 is_2ghz = info->band == NL80211_BAND_2GHZ;
1116 pmodal = &eep->modalHeader[is_2ghz]; 1116 pmodal = &eep->modalHeader[is_2ghz];
1117 power_ht40delta = pmodal->ht40PowerIncForPdadc; 1117 power_ht40delta = pmodal->ht40PowerIncForPdadc;
1118 } else { 1118 } else {
@@ -1236,7 +1236,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
1236 1236
1237 /* legacy rates */ 1237 /* legacy rates */
1238 rate = &common->sbands[tx_info->band].bitrates[rates[i].idx]; 1238 rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
1239 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1239 if ((tx_info->band == NL80211_BAND_2GHZ) &&
1240 !(rate->flags & IEEE80211_RATE_ERP_G)) 1240 !(rate->flags & IEEE80211_RATE_ERP_G))
1241 phy = WLAN_RC_PHY_CCK; 1241 phy = WLAN_RC_PHY_CCK;
1242 else 1242 else
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index a2f005703c04..7d4a72dc98db 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -48,7 +48,7 @@ int carl9170_set_dyn_sifs_ack(struct ar9170 *ar)
48 if (conf_is_ht40(&ar->hw->conf)) 48 if (conf_is_ht40(&ar->hw->conf))
49 val = 0x010a; 49 val = 0x010a;
50 else { 50 else {
51 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) 51 if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
52 val = 0x105; 52 val = 0x105;
53 else 53 else
54 val = 0x104; 54 val = 0x104;
@@ -66,7 +66,7 @@ int carl9170_set_rts_cts_rate(struct ar9170 *ar)
66 rts_rate = 0x1da; 66 rts_rate = 0x1da;
67 cts_rate = 0x10a; 67 cts_rate = 0x10a;
68 } else { 68 } else {
69 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) { 69 if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
70 /* 11 mbit CCK */ 70 /* 11 mbit CCK */
71 rts_rate = 033; 71 rts_rate = 033;
72 cts_rate = 003; 72 cts_rate = 003;
@@ -93,7 +93,7 @@ int carl9170_set_slot_time(struct ar9170 *ar)
93 return 0; 93 return 0;
94 } 94 }
95 95
96 if ((ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) || 96 if ((ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) ||
97 vif->bss_conf.use_short_slot) 97 vif->bss_conf.use_short_slot)
98 slottime = 9; 98 slottime = 9;
99 99
@@ -120,7 +120,7 @@ int carl9170_set_mac_rates(struct ar9170 *ar)
120 basic |= (vif->bss_conf.basic_rates & 0xff0) << 4; 120 basic |= (vif->bss_conf.basic_rates & 0xff0) << 4;
121 rcu_read_unlock(); 121 rcu_read_unlock();
122 122
123 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) 123 if (ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
124 mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */ 124 mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */
125 else 125 else
126 mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */ 126 mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */
@@ -512,10 +512,10 @@ int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel)
512 chains = AR9170_TX_PHY_TXCHAIN_1; 512 chains = AR9170_TX_PHY_TXCHAIN_1;
513 513
514 switch (channel->band) { 514 switch (channel->band) {
515 case IEEE80211_BAND_2GHZ: 515 case NL80211_BAND_2GHZ:
516 power = ar->power_2G_ofdm[0] & 0x3f; 516 power = ar->power_2G_ofdm[0] & 0x3f;
517 break; 517 break;
518 case IEEE80211_BAND_5GHZ: 518 case NL80211_BAND_5GHZ:
519 power = ar->power_5G_leg[0] & 0x3f; 519 power = ar->power_5G_leg[0] & 0x3f;
520 break; 520 break;
521 default: 521 default:
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 4d1527a2e292..ffb22a04beeb 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1666,7 +1666,7 @@ static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1666 return err; 1666 return err;
1667 } 1667 }
1668 1668
1669 for (b = 0; b < IEEE80211_NUM_BANDS; b++) { 1669 for (b = 0; b < NUM_NL80211_BANDS; b++) {
1670 band = ar->hw->wiphy->bands[b]; 1670 band = ar->hw->wiphy->bands[b];
1671 1671
1672 if (!band) 1672 if (!band)
@@ -1941,13 +1941,13 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
1941 } 1941 }
1942 1942
1943 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) { 1943 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1944 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 1944 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] =
1945 &carl9170_band_2GHz; 1945 &carl9170_band_2GHz;
1946 chans += carl9170_band_2GHz.n_channels; 1946 chans += carl9170_band_2GHz.n_channels;
1947 bands++; 1947 bands++;
1948 } 1948 }
1949 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) { 1949 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1950 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 1950 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] =
1951 &carl9170_band_5GHz; 1951 &carl9170_band_5GHz;
1952 chans += carl9170_band_5GHz.n_channels; 1952 chans += carl9170_band_5GHz.n_channels;
1953 bands++; 1953 bands++;
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index dca6df13fd5b..34d9fd77046e 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -540,11 +540,11 @@ static int carl9170_init_phy_from_eeprom(struct ar9170 *ar,
540 return carl9170_regwrite_result(); 540 return carl9170_regwrite_result();
541} 541}
542 542
543static int carl9170_init_phy(struct ar9170 *ar, enum ieee80211_band band) 543static int carl9170_init_phy(struct ar9170 *ar, enum nl80211_band band)
544{ 544{
545 int i, err; 545 int i, err;
546 u32 val; 546 u32 val;
547 bool is_2ghz = band == IEEE80211_BAND_2GHZ; 547 bool is_2ghz = band == NL80211_BAND_2GHZ;
548 bool is_40mhz = conf_is_ht40(&ar->hw->conf); 548 bool is_40mhz = conf_is_ht40(&ar->hw->conf);
549 549
550 carl9170_regwrite_begin(ar); 550 carl9170_regwrite_begin(ar);
@@ -1125,13 +1125,13 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar,
1125 u8 f, tmp; 1125 u8 f, tmp;
1126 1126
1127 switch (channel->band) { 1127 switch (channel->band) {
1128 case IEEE80211_BAND_2GHZ: 1128 case NL80211_BAND_2GHZ:
1129 f = channel->center_freq - 2300; 1129 f = channel->center_freq - 2300;
1130 cal_freq_pier = ar->eeprom.cal_freq_pier_2G; 1130 cal_freq_pier = ar->eeprom.cal_freq_pier_2G;
1131 i = AR5416_NUM_2G_CAL_PIERS - 1; 1131 i = AR5416_NUM_2G_CAL_PIERS - 1;
1132 break; 1132 break;
1133 1133
1134 case IEEE80211_BAND_5GHZ: 1134 case NL80211_BAND_5GHZ:
1135 f = (channel->center_freq - 4800) / 5; 1135 f = (channel->center_freq - 4800) / 5;
1136 cal_freq_pier = ar->eeprom.cal_freq_pier_5G; 1136 cal_freq_pier = ar->eeprom.cal_freq_pier_5G;
1137 i = AR5416_NUM_5G_CAL_PIERS - 1; 1137 i = AR5416_NUM_5G_CAL_PIERS - 1;
@@ -1158,12 +1158,12 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar,
1158 int j; 1158 int j;
1159 1159
1160 switch (channel->band) { 1160 switch (channel->band) {
1161 case IEEE80211_BAND_2GHZ: 1161 case NL80211_BAND_2GHZ:
1162 cal_pier_data = &ar->eeprom. 1162 cal_pier_data = &ar->eeprom.
1163 cal_pier_data_2G[chain][idx]; 1163 cal_pier_data_2G[chain][idx];
1164 break; 1164 break;
1165 1165
1166 case IEEE80211_BAND_5GHZ: 1166 case NL80211_BAND_5GHZ:
1167 cal_pier_data = &ar->eeprom. 1167 cal_pier_data = &ar->eeprom.
1168 cal_pier_data_5G[chain][idx]; 1168 cal_pier_data_5G[chain][idx];
1169 break; 1169 break;
@@ -1340,7 +1340,7 @@ static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
1340 /* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */ 1340 /* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */
1341 return; 1341 return;
1342 1342
1343 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) { 1343 if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
1344 modes = mode_list_2ghz; 1344 modes = mode_list_2ghz;
1345 nr_modes = ARRAY_SIZE(mode_list_2ghz); 1345 nr_modes = ARRAY_SIZE(mode_list_2ghz);
1346 } else { 1346 } else {
@@ -1607,7 +1607,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1607 return err; 1607 return err;
1608 1608
1609 err = carl9170_init_rf_banks_0_7(ar, 1609 err = carl9170_init_rf_banks_0_7(ar,
1610 channel->band == IEEE80211_BAND_5GHZ); 1610 channel->band == NL80211_BAND_5GHZ);
1611 if (err) 1611 if (err)
1612 return err; 1612 return err;
1613 1613
@@ -1621,7 +1621,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1621 return err; 1621 return err;
1622 1622
1623 err = carl9170_init_rf_bank4_pwr(ar, 1623 err = carl9170_init_rf_bank4_pwr(ar,
1624 channel->band == IEEE80211_BAND_5GHZ, 1624 channel->band == NL80211_BAND_5GHZ,
1625 channel->center_freq, bw); 1625 channel->center_freq, bw);
1626 if (err) 1626 if (err)
1627 return err; 1627 return err;
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index d66533cbc38a..0c34c8729dc6 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -417,7 +417,7 @@ static int carl9170_rx_mac_status(struct ar9170 *ar,
417 417
418 return -EINVAL; 418 return -EINVAL;
419 } 419 }
420 if (status->band == IEEE80211_BAND_2GHZ) 420 if (status->band == NL80211_BAND_2GHZ)
421 status->rate_idx += 4; 421 status->rate_idx += 4;
422 break; 422 break;
423 423
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index ae86a600d920..2bf04c9edc98 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -720,12 +720,12 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
720 /* +1 dBm for HT40 */ 720 /* +1 dBm for HT40 */
721 *tpc += 2; 721 *tpc += 2;
722 722
723 if (info->band == IEEE80211_BAND_2GHZ) 723 if (info->band == NL80211_BAND_2GHZ)
724 txpower = ar->power_2G_ht40; 724 txpower = ar->power_2G_ht40;
725 else 725 else
726 txpower = ar->power_5G_ht40; 726 txpower = ar->power_5G_ht40;
727 } else { 727 } else {
728 if (info->band == IEEE80211_BAND_2GHZ) 728 if (info->band == NL80211_BAND_2GHZ)
729 txpower = ar->power_2G_ht20; 729 txpower = ar->power_2G_ht20;
730 else 730 else
731 txpower = ar->power_5G_ht20; 731 txpower = ar->power_5G_ht20;
@@ -734,7 +734,7 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
734 *phyrate = txrate->idx; 734 *phyrate = txrate->idx;
735 *tpc += txpower[idx & 7]; 735 *tpc += txpower[idx & 7];
736 } else { 736 } else {
737 if (info->band == IEEE80211_BAND_2GHZ) { 737 if (info->band == NL80211_BAND_2GHZ) {
738 if (idx < 4) 738 if (idx < 4)
739 txpower = ar->power_2G_cck; 739 txpower = ar->power_2G_cck;
740 else 740 else
@@ -797,7 +797,7 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
797 * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD); 797 * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
798 */ 798 */
799 } else { 799 } else {
800 if (info->band == IEEE80211_BAND_2GHZ) { 800 if (info->band == NL80211_BAND_2GHZ) {
801 if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M) 801 if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
802 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK); 802 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
803 else 803 else
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 06ea6cc9e30a..7e15ed9ed31f 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -336,12 +336,12 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
336 struct ath_regulatory *reg, 336 struct ath_regulatory *reg,
337 enum nl80211_reg_initiator initiator) 337 enum nl80211_reg_initiator initiator)
338{ 338{
339 enum ieee80211_band band; 339 enum nl80211_band band;
340 struct ieee80211_supported_band *sband; 340 struct ieee80211_supported_band *sband;
341 struct ieee80211_channel *ch; 341 struct ieee80211_channel *ch;
342 unsigned int i; 342 unsigned int i;
343 343
344 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 344 for (band = 0; band < NUM_NL80211_BANDS; band++) {
345 if (!wiphy->bands[band]) 345 if (!wiphy->bands[band])
346 continue; 346 continue;
347 sband = wiphy->bands[band]; 347 sband = wiphy->bands[band];
@@ -374,7 +374,7 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy,
374{ 374{
375 struct ieee80211_supported_band *sband; 375 struct ieee80211_supported_band *sband;
376 376
377 sband = wiphy->bands[IEEE80211_BAND_2GHZ]; 377 sband = wiphy->bands[NL80211_BAND_2GHZ];
378 if (!sband) 378 if (!sband)
379 return; 379 return;
380 380
@@ -402,10 +402,10 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
402 struct ieee80211_channel *ch; 402 struct ieee80211_channel *ch;
403 unsigned int i; 403 unsigned int i;
404 404
405 if (!wiphy->bands[IEEE80211_BAND_5GHZ]) 405 if (!wiphy->bands[NL80211_BAND_5GHZ])
406 return; 406 return;
407 407
408 sband = wiphy->bands[IEEE80211_BAND_5GHZ]; 408 sband = wiphy->bands[NL80211_BAND_5GHZ];
409 409
410 for (i = 0; i < sband->n_channels; i++) { 410 for (i = 0; i < sband->n_channels; i++) {
411 ch = &sband->channels[i]; 411 ch = &sband->channels[i];
@@ -772,7 +772,7 @@ ath_regd_init(struct ath_regulatory *reg,
772EXPORT_SYMBOL(ath_regd_init); 772EXPORT_SYMBOL(ath_regd_init);
773 773
774u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, 774u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
775 enum ieee80211_band band) 775 enum nl80211_band band)
776{ 776{
777 if (!reg->regpair || 777 if (!reg->regpair ||
778 (reg->country_code == CTRY_DEFAULT && 778 (reg->country_code == CTRY_DEFAULT &&
@@ -794,9 +794,9 @@ u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
794 } 794 }
795 795
796 switch (band) { 796 switch (band) {
797 case IEEE80211_BAND_2GHZ: 797 case NL80211_BAND_2GHZ:
798 return reg->regpair->reg_2ghz_ctl; 798 return reg->regpair->reg_2ghz_ctl;
799 case IEEE80211_BAND_5GHZ: 799 case NL80211_BAND_5GHZ:
800 return reg->regpair->reg_5ghz_ctl; 800 return reg->regpair->reg_5ghz_ctl;
801 default: 801 default:
802 return NO_CTL; 802 return NO_CTL;
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 37f53bd8fcb1..565d3075f06e 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -255,7 +255,7 @@ int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
255 void (*reg_notifier)(struct wiphy *wiphy, 255 void (*reg_notifier)(struct wiphy *wiphy,
256 struct regulatory_request *request)); 256 struct regulatory_request *request));
257u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, 257u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
258 enum ieee80211_band band); 258 enum nl80211_band band);
259void ath_reg_notifier_apply(struct wiphy *wiphy, 259void ath_reg_notifier_apply(struct wiphy *wiphy,
260 struct regulatory_request *request, 260 struct regulatory_request *request,
261 struct ath_regulatory *reg); 261 struct ath_regulatory *reg);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index a27279c2c695..9a1db3bbec4e 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -26,14 +26,14 @@ module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
26MODULE_PARM_DESC(debug_mask, "Debugging mask"); 26MODULE_PARM_DESC(debug_mask, "Debugging mask");
27 27
28#define CHAN2G(_freq, _idx) { \ 28#define CHAN2G(_freq, _idx) { \
29 .band = IEEE80211_BAND_2GHZ, \ 29 .band = NL80211_BAND_2GHZ, \
30 .center_freq = (_freq), \ 30 .center_freq = (_freq), \
31 .hw_value = (_idx), \ 31 .hw_value = (_idx), \
32 .max_power = 25, \ 32 .max_power = 25, \
33} 33}
34 34
35#define CHAN5G(_freq, _idx) { \ 35#define CHAN5G(_freq, _idx) { \
36 .band = IEEE80211_BAND_5GHZ, \ 36 .band = NL80211_BAND_5GHZ, \
37 .center_freq = (_freq), \ 37 .center_freq = (_freq), \
38 .hw_value = (_idx), \ 38 .hw_value = (_idx), \
39 .max_power = 25, \ 39 .max_power = 25, \
@@ -516,7 +516,7 @@ static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
516} 516}
517 517
518static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, 518static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
519 enum ieee80211_band band) 519 enum nl80211_band band)
520{ 520{
521 int i, size; 521 int i, size;
522 u16 *rates_table; 522 u16 *rates_table;
@@ -529,7 +529,7 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
529 529
530 size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates); 530 size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
531 rates_table = sta_priv->supported_rates.dsss_rates; 531 rates_table = sta_priv->supported_rates.dsss_rates;
532 if (band == IEEE80211_BAND_2GHZ) { 532 if (band == NL80211_BAND_2GHZ) {
533 for (i = 0; i < size; i++) { 533 for (i = 0; i < size; i++) {
534 if (rates & 0x01) { 534 if (rates & 0x01) {
535 rates_table[i] = wcn_2ghz_rates[i].hw_value; 535 rates_table[i] = wcn_2ghz_rates[i].hw_value;
@@ -958,8 +958,8 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
958 BIT(NL80211_IFTYPE_ADHOC) | 958 BIT(NL80211_IFTYPE_ADHOC) |
959 BIT(NL80211_IFTYPE_MESH_POINT); 959 BIT(NL80211_IFTYPE_MESH_POINT);
960 960
961 wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz; 961 wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz;
962 wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz; 962 wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz;
963 963
964 wcn->hw->wiphy->cipher_suites = cipher_suites; 964 wcn->hw->wiphy->cipher_suites = cipher_suites;
965 wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 965 wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 74f56a81ad9a..96992a2c4b42 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -104,11 +104,11 @@ static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
104 struct ieee80211_sta *sta, 104 struct ieee80211_sta *sta,
105 struct wcn36xx_hal_config_bss_params *bss_params) 105 struct wcn36xx_hal_config_bss_params *bss_params)
106{ 106{
107 if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn)) 107 if (NL80211_BAND_5GHZ == WCN36XX_BAND(wcn))
108 bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE; 108 bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
109 else if (sta && sta->ht_cap.ht_supported) 109 else if (sta && sta->ht_cap.ht_supported)
110 bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE; 110 bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
111 else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f)) 111 else if (sta && (sta->supp_rates[NL80211_BAND_2GHZ] & 0x7f))
112 bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE; 112 bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
113 else 113 else
114 bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE; 114 bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 9bec8237231d..6c47a7336c38 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -57,7 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
57 RX_FLAG_MMIC_STRIPPED | 57 RX_FLAG_MMIC_STRIPPED |
58 RX_FLAG_DECRYPTED; 58 RX_FLAG_DECRYPTED;
59 59
60 wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag); 60 wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%llx\n", status.flag);
61 61
62 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 62 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
63 63
@@ -225,7 +225,7 @@ static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
225 225
226 /* default rate for unicast */ 226 /* default rate for unicast */
227 if (ieee80211_is_mgmt(hdr->frame_control)) 227 if (ieee80211_is_mgmt(hdr->frame_control))
228 bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ? 228 bd->bd_rate = (WCN36XX_BAND(wcn) == NL80211_BAND_5GHZ) ?
229 WCN36XX_BD_RATE_CTRL : 229 WCN36XX_BD_RATE_CTRL :
230 WCN36XX_BD_RATE_MGMT; 230 WCN36XX_BD_RATE_MGMT;
231 else if (ieee80211_is_ctl(hdr->frame_control)) 231 else if (ieee80211_is_ctl(hdr->frame_control))
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index fdf63d5fe82b..11b544b26c74 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -18,6 +18,7 @@ wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
18wil6210-y += wil_platform.o 18wil6210-y += wil_platform.o
19wil6210-y += ethtool.o 19wil6210-y += ethtool.o
20wil6210-y += wil_crash_dump.o 20wil6210-y += wil_crash_dump.o
21wil6210-y += p2p.o
21 22
22# for tracing framework to find trace.h 23# for tracing framework to find trace.h
23CFLAGS_trace.o := -I$(src) 24CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 11f1bb8dfebe..0fb3a7941d84 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -18,8 +18,10 @@
18#include "wil6210.h" 18#include "wil6210.h"
19#include "wmi.h" 19#include "wmi.h"
20 20
21#define WIL_MAX_ROC_DURATION_MS 5000
22
21#define CHAN60G(_channel, _flags) { \ 23#define CHAN60G(_channel, _flags) { \
22 .band = IEEE80211_BAND_60GHZ, \ 24 .band = NL80211_BAND_60GHZ, \
23 .center_freq = 56160 + (2160 * (_channel)), \ 25 .center_freq = 56160 + (2160 * (_channel)), \
24 .hw_value = (_channel), \ 26 .hw_value = (_channel), \
25 .flags = (_flags), \ 27 .flags = (_flags), \
@@ -76,12 +78,24 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
76 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | 78 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
77 BIT(IEEE80211_STYPE_PROBE_REQ >> 4) 79 BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
78 }, 80 },
81 [NL80211_IFTYPE_P2P_DEVICE] = {
82 .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
83 BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
84 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
85 BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
86 },
79}; 87};
80 88
81static const u32 wil_cipher_suites[] = { 89static const u32 wil_cipher_suites[] = {
82 WLAN_CIPHER_SUITE_GCMP, 90 WLAN_CIPHER_SUITE_GCMP,
83}; 91};
84 92
93static const char * const key_usage_str[] = {
94 [WMI_KEY_USE_PAIRWISE] = "PTK",
95 [WMI_KEY_USE_RX_GROUP] = "RX_GTK",
96 [WMI_KEY_USE_TX_GROUP] = "TX_GTK",
97};
98
85int wil_iftype_nl2wmi(enum nl80211_iftype type) 99int wil_iftype_nl2wmi(enum nl80211_iftype type)
86{ 100{
87 static const struct { 101 static const struct {
@@ -113,7 +127,7 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
113 .interval_usec = 0, 127 .interval_usec = 0,
114 }; 128 };
115 struct { 129 struct {
116 struct wil6210_mbox_hdr_wmi wmi; 130 struct wmi_cmd_hdr wmi;
117 struct wmi_notify_req_done_event evt; 131 struct wmi_notify_req_done_event evt;
118 } __packed reply; 132 } __packed reply;
119 struct wil_net_stats *stats = &wil->sta[cid].stats; 133 struct wil_net_stats *stats = &wil->sta[cid].stats;
@@ -226,13 +240,82 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
226 return rc; 240 return rc;
227} 241}
228 242
243static struct wireless_dev *
244wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
245 unsigned char name_assign_type,
246 enum nl80211_iftype type,
247 u32 *flags, struct vif_params *params)
248{
249 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
250 struct net_device *ndev = wil_to_ndev(wil);
251 struct wireless_dev *p2p_wdev;
252
253 wil_dbg_misc(wil, "%s()\n", __func__);
254
255 if (type != NL80211_IFTYPE_P2P_DEVICE) {
256 wil_err(wil, "%s: unsupported iftype %d\n", __func__, type);
257 return ERR_PTR(-EINVAL);
258 }
259
260 if (wil->p2p_wdev) {
261 wil_err(wil, "%s: P2P_DEVICE interface already created\n",
262 __func__);
263 return ERR_PTR(-EINVAL);
264 }
265
266 p2p_wdev = kzalloc(sizeof(*p2p_wdev), GFP_KERNEL);
267 if (!p2p_wdev)
268 return ERR_PTR(-ENOMEM);
269
270 p2p_wdev->iftype = type;
271 p2p_wdev->wiphy = wiphy;
272 /* use our primary ethernet address */
273 ether_addr_copy(p2p_wdev->address, ndev->perm_addr);
274
275 wil->p2p_wdev = p2p_wdev;
276
277 return p2p_wdev;
278}
279
280static int wil_cfg80211_del_iface(struct wiphy *wiphy,
281 struct wireless_dev *wdev)
282{
283 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
284
285 wil_dbg_misc(wil, "%s()\n", __func__);
286
287 if (wdev != wil->p2p_wdev) {
288 wil_err(wil, "%s: delete of incorrect interface 0x%p\n",
289 __func__, wdev);
290 return -EINVAL;
291 }
292
293 wil_p2p_wdev_free(wil);
294
295 return 0;
296}
297
229static int wil_cfg80211_change_iface(struct wiphy *wiphy, 298static int wil_cfg80211_change_iface(struct wiphy *wiphy,
230 struct net_device *ndev, 299 struct net_device *ndev,
231 enum nl80211_iftype type, u32 *flags, 300 enum nl80211_iftype type, u32 *flags,
232 struct vif_params *params) 301 struct vif_params *params)
233{ 302{
234 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 303 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
235 struct wireless_dev *wdev = wil->wdev; 304 struct wireless_dev *wdev = wil_to_wdev(wil);
305 int rc;
306
307 wil_dbg_misc(wil, "%s() type=%d\n", __func__, type);
308
309 if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) {
310 wil_dbg_misc(wil, "interface is up. resetting...\n");
311 mutex_lock(&wil->mutex);
312 __wil_down(wil);
313 rc = __wil_up(wil);
314 mutex_unlock(&wil->mutex);
315
316 if (rc)
317 return rc;
318 }
236 319
237 switch (type) { 320 switch (type) {
238 case NL80211_IFTYPE_STATION: 321 case NL80211_IFTYPE_STATION:
@@ -260,7 +343,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
260 struct cfg80211_scan_request *request) 343 struct cfg80211_scan_request *request)
261{ 344{
262 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 345 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
263 struct wireless_dev *wdev = wil->wdev; 346 struct wireless_dev *wdev = request->wdev;
264 struct { 347 struct {
265 struct wmi_start_scan_cmd cmd; 348 struct wmi_start_scan_cmd cmd;
266 u16 chnl[4]; 349 u16 chnl[4];
@@ -268,6 +351,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
268 uint i, n; 351 uint i, n;
269 int rc; 352 int rc;
270 353
354 wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
355 __func__, wdev, wdev->iftype);
356
271 if (wil->scan_request) { 357 if (wil->scan_request) {
272 wil_err(wil, "Already scanning\n"); 358 wil_err(wil, "Already scanning\n");
273 return -EAGAIN; 359 return -EAGAIN;
@@ -277,6 +363,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
277 switch (wdev->iftype) { 363 switch (wdev->iftype) {
278 case NL80211_IFTYPE_STATION: 364 case NL80211_IFTYPE_STATION:
279 case NL80211_IFTYPE_P2P_CLIENT: 365 case NL80211_IFTYPE_P2P_CLIENT:
366 case NL80211_IFTYPE_P2P_DEVICE:
280 break; 367 break;
281 default: 368 default:
282 return -EOPNOTSUPP; 369 return -EOPNOTSUPP;
@@ -288,6 +375,20 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
288 return -EBUSY; 375 return -EBUSY;
289 } 376 }
290 377
378 /* scan on P2P_DEVICE is handled as p2p search */
379 if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
380 wil->scan_request = request;
381 wil->radio_wdev = wdev;
382 rc = wil_p2p_search(wil, request);
383 if (rc) {
384 wil->radio_wdev = wil_to_wdev(wil);
385 wil->scan_request = NULL;
386 }
387 return rc;
388 }
389
390 (void)wil_p2p_stop_discovery(wil);
391
291 wil_dbg_misc(wil, "Start scan_request 0x%p\n", request); 392 wil_dbg_misc(wil, "Start scan_request 0x%p\n", request);
292 wil_dbg_misc(wil, "SSID count: %d", request->n_ssids); 393 wil_dbg_misc(wil, "SSID count: %d", request->n_ssids);
293 394
@@ -313,6 +414,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
313 mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO); 414 mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
314 415
315 memset(&cmd, 0, sizeof(cmd)); 416 memset(&cmd, 0, sizeof(cmd));
417 cmd.cmd.scan_type = WMI_ACTIVE_SCAN;
316 cmd.cmd.num_channels = 0; 418 cmd.cmd.num_channels = 0;
317 n = min(request->n_channels, 4U); 419 n = min(request->n_channels, 4U);
318 for (i = 0; i < n; i++) { 420 for (i = 0; i < n; i++) {
@@ -340,12 +442,19 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
340 if (rc) 442 if (rc)
341 goto out; 443 goto out;
342 444
445 if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) {
446 cmd.cmd.discovery_mode = 1;
447 wil_dbg_misc(wil, "active scan with discovery_mode=1\n");
448 }
449
450 wil->radio_wdev = wdev;
343 rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + 451 rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
344 cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); 452 cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
345 453
346out: 454out:
347 if (rc) { 455 if (rc) {
348 del_timer_sync(&wil->scan_timer); 456 del_timer_sync(&wil->scan_timer);
457 wil->radio_wdev = wil_to_wdev(wil);
349 wil->scan_request = NULL; 458 wil->scan_request = NULL;
350 } 459 }
351 460
@@ -390,6 +499,7 @@ static void wil_print_connect_params(struct wil6210_priv *wil,
390 print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET, 499 print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET,
391 16, 1, sme->ssid, sme->ssid_len, true); 500 16, 1, sme->ssid, sme->ssid_len, true);
392 wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open"); 501 wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open");
502 wil_info(wil, " PBSS: %d\n", sme->pbss);
393 wil_print_crypto(wil, &sme->crypto); 503 wil_print_crypto(wil, &sme->crypto);
394} 504}
395 505
@@ -404,7 +514,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
404 const u8 *rsn_eid; 514 const u8 *rsn_eid;
405 int ch; 515 int ch;
406 int rc = 0; 516 int rc = 0;
517 enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
407 518
519 wil_dbg_misc(wil, "%s()\n", __func__);
408 wil_print_connect_params(wil, sme); 520 wil_print_connect_params(wil, sme);
409 521
410 if (test_bit(wil_status_fwconnecting, wil->status) || 522 if (test_bit(wil_status_fwconnecting, wil->status) ||
@@ -422,14 +534,12 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
422 if (sme->privacy && !rsn_eid) 534 if (sme->privacy && !rsn_eid)
423 wil_info(wil, "WSC connection\n"); 535 wil_info(wil, "WSC connection\n");
424 536
425 if (sme->pbss) { 537 if (sme->pbss)
426 wil_err(wil, "connect - PBSS not yet supported\n"); 538 bss_type = IEEE80211_BSS_TYPE_PBSS;
427 return -EOPNOTSUPP;
428 }
429 539
430 bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, 540 bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
431 sme->ssid, sme->ssid_len, 541 sme->ssid, sme->ssid_len,
432 IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY); 542 bss_type, IEEE80211_PRIVACY_ANY);
433 if (!bss) { 543 if (!bss) {
434 wil_err(wil, "Unable to find BSS\n"); 544 wil_err(wil, "Unable to find BSS\n");
435 return -ENOENT; 545 return -ENOENT;
@@ -568,10 +678,20 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
568 struct ieee80211_mgmt *mgmt_frame = (void *)buf; 678 struct ieee80211_mgmt *mgmt_frame = (void *)buf;
569 struct wmi_sw_tx_req_cmd *cmd; 679 struct wmi_sw_tx_req_cmd *cmd;
570 struct { 680 struct {
571 struct wil6210_mbox_hdr_wmi wmi; 681 struct wmi_cmd_hdr wmi;
572 struct wmi_sw_tx_complete_event evt; 682 struct wmi_sw_tx_complete_event evt;
573 } __packed evt; 683 } __packed evt;
574 684
685 /* Note, currently we do not support the "wait" parameter, user-space
686 * must call remain_on_channel before mgmt_tx or listen on a channel
687 * another way (AP/PCP or connected station)
688 * in addition we need to check if specified "chan" argument is
689 * different from currently "listened" channel and fail if it is.
690 */
691
692 wil_dbg_misc(wil, "%s()\n", __func__);
693 print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
694
575 cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); 695 cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
576 if (!cmd) { 696 if (!cmd) {
577 rc = -ENOMEM; 697 rc = -ENOMEM;
@@ -598,7 +718,7 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
598 struct cfg80211_chan_def *chandef) 718 struct cfg80211_chan_def *chandef)
599{ 719{
600 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 720 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
601 struct wireless_dev *wdev = wil->wdev; 721 struct wireless_dev *wdev = wil_to_wdev(wil);
602 722
603 wdev->preset_chandef = *chandef; 723 wdev->preset_chandef = *chandef;
604 724
@@ -608,22 +728,19 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
608static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, 728static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
609 bool pairwise) 729 bool pairwise)
610{ 730{
611 struct wireless_dev *wdev = wil->wdev; 731 struct wireless_dev *wdev = wil_to_wdev(wil);
612 enum wmi_key_usage rc; 732 enum wmi_key_usage rc;
613 static const char * const key_usage_str[] = {
614 [WMI_KEY_USE_PAIRWISE] = "WMI_KEY_USE_PAIRWISE",
615 [WMI_KEY_USE_RX_GROUP] = "WMI_KEY_USE_RX_GROUP",
616 [WMI_KEY_USE_TX_GROUP] = "WMI_KEY_USE_TX_GROUP",
617 };
618 733
619 if (pairwise) { 734 if (pairwise) {
620 rc = WMI_KEY_USE_PAIRWISE; 735 rc = WMI_KEY_USE_PAIRWISE;
621 } else { 736 } else {
622 switch (wdev->iftype) { 737 switch (wdev->iftype) {
623 case NL80211_IFTYPE_STATION: 738 case NL80211_IFTYPE_STATION:
739 case NL80211_IFTYPE_P2P_CLIENT:
624 rc = WMI_KEY_USE_RX_GROUP; 740 rc = WMI_KEY_USE_RX_GROUP;
625 break; 741 break;
626 case NL80211_IFTYPE_AP: 742 case NL80211_IFTYPE_AP:
743 case NL80211_IFTYPE_P2P_GO:
627 rc = WMI_KEY_USE_TX_GROUP; 744 rc = WMI_KEY_USE_TX_GROUP;
628 break; 745 break;
629 default: 746 default:
@@ -638,20 +755,86 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
638 return rc; 755 return rc;
639} 756}
640 757
758static struct wil_tid_crypto_rx_single *
759wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
760 enum wmi_key_usage key_usage, const u8 *mac_addr)
761{
762 int cid = -EINVAL;
763 int tid = 0;
764 struct wil_sta_info *s;
765 struct wil_tid_crypto_rx *c;
766
767 if (key_usage == WMI_KEY_USE_TX_GROUP)
768 return NULL; /* not needed */
769
770 /* supplicant provides Rx group key in STA mode with NULL MAC address */
771 if (mac_addr)
772 cid = wil_find_cid(wil, mac_addr);
773 else if (key_usage == WMI_KEY_USE_RX_GROUP)
774 cid = wil_find_cid_by_idx(wil, 0);
775 if (cid < 0) {
776 wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr,
777 key_usage_str[key_usage], key_index);
778 return ERR_PTR(cid);
779 }
780
781 s = &wil->sta[cid];
782 if (key_usage == WMI_KEY_USE_PAIRWISE)
783 c = &s->tid_crypto_rx[tid];
784 else
785 c = &s->group_crypto_rx;
786
787 return &c->key_id[key_index];
788}
789
641static int wil_cfg80211_add_key(struct wiphy *wiphy, 790static int wil_cfg80211_add_key(struct wiphy *wiphy,
642 struct net_device *ndev, 791 struct net_device *ndev,
643 u8 key_index, bool pairwise, 792 u8 key_index, bool pairwise,
644 const u8 *mac_addr, 793 const u8 *mac_addr,
645 struct key_params *params) 794 struct key_params *params)
646{ 795{
796 int rc;
647 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 797 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
648 enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); 798 enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
799 struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
800 key_index,
801 key_usage,
802 mac_addr);
803
804 wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
805 mac_addr, key_usage_str[key_usage], key_index,
806 params->seq_len, params->seq);
807
808 if (IS_ERR(cc)) {
809 wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
810 __func__, mac_addr, key_usage_str[key_usage], key_index,
811 params->seq_len, params->seq);
812 return -EINVAL;
813 }
649 814
650 wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, 815 if (cc)
651 pairwise ? "PTK" : "GTK"); 816 cc->key_set = false;
817
818 if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
819 wil_err(wil,
820 "Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n",
821 params->seq_len, __func__, mac_addr,
822 key_usage_str[key_usage], key_index,
823 params->seq_len, params->seq);
824 return -EINVAL;
825 }
826
827 rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
828 params->key, key_usage);
829 if ((rc == 0) && cc) {
830 if (params->seq)
831 memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
832 else
833 memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
834 cc->key_set = true;
835 }
652 836
653 return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len, 837 return rc;
654 params->key, key_usage);
655} 838}
656 839
657static int wil_cfg80211_del_key(struct wiphy *wiphy, 840static int wil_cfg80211_del_key(struct wiphy *wiphy,
@@ -661,9 +844,20 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
661{ 844{
662 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 845 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
663 enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); 846 enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
847 struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
848 key_index,
849 key_usage,
850 mac_addr);
851
852 wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
853 key_usage_str[key_usage], key_index);
854
855 if (IS_ERR(cc))
856 wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
857 mac_addr, key_usage_str[key_usage], key_index);
664 858
665 wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, 859 if (!IS_ERR_OR_NULL(cc))
666 pairwise ? "PTK" : "GTK"); 860 cc->key_set = false;
667 861
668 return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage); 862 return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
669} 863}
@@ -674,6 +868,9 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
674 u8 key_index, bool unicast, 868 u8 key_index, bool unicast,
675 bool multicast) 869 bool multicast)
676{ 870{
871 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
872
873 wil_dbg_misc(wil, "%s: entered\n", __func__);
677 return 0; 874 return 0;
678} 875}
679 876
@@ -686,16 +883,19 @@ static int wil_remain_on_channel(struct wiphy *wiphy,
686 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 883 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
687 int rc; 884 int rc;
688 885
689 /* TODO: handle duration */ 886 wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n",
690 wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration); 887 __func__, chan->center_freq, duration, wdev->iftype);
691 888
692 rc = wmi_set_channel(wil, chan->hw_value); 889 rc = wil_p2p_listen(wil, duration, chan, cookie);
693 if (rc) 890 if (rc)
694 return rc; 891 return rc;
695 892
696 rc = wmi_rxon(wil, true); 893 wil->radio_wdev = wdev;
697 894
698 return rc; 895 cfg80211_ready_on_channel(wdev, *cookie, chan, duration,
896 GFP_KERNEL);
897
898 return 0;
699} 899}
700 900
701static int wil_cancel_remain_on_channel(struct wiphy *wiphy, 901static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -703,13 +903,10 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
703 u64 cookie) 903 u64 cookie)
704{ 904{
705 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 905 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
706 int rc;
707
708 wil_info(wil, "%s()\n", __func__);
709 906
710 rc = wmi_rxon(wil, false); 907 wil_dbg_misc(wil, "%s()\n", __func__);
711 908
712 return rc; 909 return wil_p2p_cancel_listen(wil, cookie);
713} 910}
714 911
715/** 912/**
@@ -852,12 +1049,22 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
852 const u8 *ssid, size_t ssid_len, u32 privacy, 1049 const u8 *ssid, size_t ssid_len, u32 privacy,
853 int bi, u8 chan, 1050 int bi, u8 chan,
854 struct cfg80211_beacon_data *bcon, 1051 struct cfg80211_beacon_data *bcon,
855 u8 hidden_ssid) 1052 u8 hidden_ssid, u32 pbss)
856{ 1053{
857 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 1054 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
858 int rc; 1055 int rc;
859 struct wireless_dev *wdev = ndev->ieee80211_ptr; 1056 struct wireless_dev *wdev = ndev->ieee80211_ptr;
860 u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); 1057 u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
1058 u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO);
1059
1060 if (pbss)
1061 wmi_nettype = WMI_NETTYPE_P2P;
1062
1063 wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go);
1064 if (is_go && !pbss) {
1065 wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__);
1066 return -ENOTSUPP;
1067 }
861 1068
862 wil_set_recovery_state(wil, fw_recovery_idle); 1069 wil_set_recovery_state(wil, fw_recovery_idle);
863 1070
@@ -879,10 +1086,11 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
879 wil->privacy = privacy; 1086 wil->privacy = privacy;
880 wil->channel = chan; 1087 wil->channel = chan;
881 wil->hidden_ssid = hidden_ssid; 1088 wil->hidden_ssid = hidden_ssid;
1089 wil->pbss = pbss;
882 1090
883 netif_carrier_on(ndev); 1091 netif_carrier_on(ndev);
884 1092
885 rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid); 1093 rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go);
886 if (rc) 1094 if (rc)
887 goto err_pcp_start; 1095 goto err_pcp_start;
888 1096
@@ -928,7 +1136,8 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
928 wdev->ssid_len, privacy, 1136 wdev->ssid_len, privacy,
929 wdev->beacon_interval, 1137 wdev->beacon_interval,
930 wil->channel, bcon, 1138 wil->channel, bcon,
931 wil->hidden_ssid); 1139 wil->hidden_ssid,
1140 wil->pbss);
932 } else { 1141 } else {
933 rc = _wil_cfg80211_set_ies(wiphy, bcon); 1142 rc = _wil_cfg80211_set_ies(wiphy, bcon);
934 } 1143 }
@@ -954,11 +1163,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
954 return -EINVAL; 1163 return -EINVAL;
955 } 1164 }
956 1165
957 if (info->pbss) {
958 wil_err(wil, "AP: PBSS not yet supported\n");
959 return -EOPNOTSUPP;
960 }
961
962 switch (info->hidden_ssid) { 1166 switch (info->hidden_ssid) {
963 case NL80211_HIDDEN_SSID_NOT_IN_USE: 1167 case NL80211_HIDDEN_SSID_NOT_IN_USE:
964 hidden_ssid = WMI_HIDDEN_SSID_DISABLED; 1168 hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
@@ -984,6 +1188,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
984 info->hidden_ssid); 1188 info->hidden_ssid);
985 wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval, 1189 wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
986 info->dtim_period); 1190 info->dtim_period);
1191 wil_dbg_misc(wil, "PBSS %d\n", info->pbss);
987 print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, 1192 print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
988 info->ssid, info->ssid_len); 1193 info->ssid, info->ssid_len);
989 wil_print_bcon_data(bcon); 1194 wil_print_bcon_data(bcon);
@@ -992,7 +1197,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
992 rc = _wil_cfg80211_start_ap(wiphy, ndev, 1197 rc = _wil_cfg80211_start_ap(wiphy, ndev,
993 info->ssid, info->ssid_len, info->privacy, 1198 info->ssid, info->ssid_len, info->privacy,
994 info->beacon_interval, channel->hw_value, 1199 info->beacon_interval, channel->hw_value,
995 bcon, hidden_ssid); 1200 bcon, hidden_ssid, info->pbss);
996 1201
997 return rc; 1202 return rc;
998} 1203}
@@ -1139,7 +1344,26 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy,
1139 return 0; 1344 return 0;
1140} 1345}
1141 1346
1347static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
1348 struct wireless_dev *wdev)
1349{
1350 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
1351
1352 wil_dbg_misc(wil, "%s: entered\n", __func__);
1353 return 0;
1354}
1355
1356static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
1357 struct wireless_dev *wdev)
1358{
1359 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
1360
1361 wil_dbg_misc(wil, "%s: entered\n", __func__);
1362}
1363
1142static struct cfg80211_ops wil_cfg80211_ops = { 1364static struct cfg80211_ops wil_cfg80211_ops = {
1365 .add_virtual_intf = wil_cfg80211_add_iface,
1366 .del_virtual_intf = wil_cfg80211_del_iface,
1143 .scan = wil_cfg80211_scan, 1367 .scan = wil_cfg80211_scan,
1144 .connect = wil_cfg80211_connect, 1368 .connect = wil_cfg80211_connect,
1145 .disconnect = wil_cfg80211_disconnect, 1369 .disconnect = wil_cfg80211_disconnect,
@@ -1160,20 +1384,25 @@ static struct cfg80211_ops wil_cfg80211_ops = {
1160 .del_station = wil_cfg80211_del_station, 1384 .del_station = wil_cfg80211_del_station,
1161 .probe_client = wil_cfg80211_probe_client, 1385 .probe_client = wil_cfg80211_probe_client,
1162 .change_bss = wil_cfg80211_change_bss, 1386 .change_bss = wil_cfg80211_change_bss,
1387 /* P2P device */
1388 .start_p2p_device = wil_cfg80211_start_p2p_device,
1389 .stop_p2p_device = wil_cfg80211_stop_p2p_device,
1163}; 1390};
1164 1391
1165static void wil_wiphy_init(struct wiphy *wiphy) 1392static void wil_wiphy_init(struct wiphy *wiphy)
1166{ 1393{
1167 wiphy->max_scan_ssids = 1; 1394 wiphy->max_scan_ssids = 1;
1168 wiphy->max_scan_ie_len = WMI_MAX_IE_LEN; 1395 wiphy->max_scan_ie_len = WMI_MAX_IE_LEN;
1396 wiphy->max_remain_on_channel_duration = WIL_MAX_ROC_DURATION_MS;
1169 wiphy->max_num_pmkids = 0 /* TODO: */; 1397 wiphy->max_num_pmkids = 0 /* TODO: */;
1170 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 1398 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1171 BIT(NL80211_IFTYPE_AP) | 1399 BIT(NL80211_IFTYPE_AP) |
1400 BIT(NL80211_IFTYPE_P2P_CLIENT) |
1401 BIT(NL80211_IFTYPE_P2P_GO) |
1402 BIT(NL80211_IFTYPE_P2P_DEVICE) |
1172 BIT(NL80211_IFTYPE_MONITOR); 1403 BIT(NL80211_IFTYPE_MONITOR);
1173 /* TODO: enable P2P when integrated with supplicant:
1174 * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)
1175 */
1176 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | 1404 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
1405 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
1177 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 1406 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
1178 dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n", 1407 dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
1179 __func__, wiphy->flags); 1408 __func__, wiphy->flags);
@@ -1182,7 +1411,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
1182 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 1411 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
1183 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 1412 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
1184 1413
1185 wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz; 1414 wiphy->bands[NL80211_BAND_60GHZ] = &wil_band_60ghz;
1186 1415
1187 /* TODO: figure this out */ 1416 /* TODO: figure this out */
1188 wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; 1417 wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
@@ -1241,3 +1470,18 @@ void wil_wdev_free(struct wil6210_priv *wil)
1241 wiphy_free(wdev->wiphy); 1470 wiphy_free(wdev->wiphy);
1242 kfree(wdev); 1471 kfree(wdev);
1243} 1472}
1473
1474void wil_p2p_wdev_free(struct wil6210_priv *wil)
1475{
1476 struct wireless_dev *p2p_wdev;
1477
1478 mutex_lock(&wil->p2p_wdev_mutex);
1479 p2p_wdev = wil->p2p_wdev;
1480 if (p2p_wdev) {
1481 wil->p2p_wdev = NULL;
1482 wil->radio_wdev = wil_to_wdev(wil);
1483 cfg80211_unregister_wdev(p2p_wdev);
1484 kfree(p2p_wdev);
1485 }
1486 mutex_unlock(&wil->p2p_wdev_mutex);
1487}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 3bbe73b6d05a..b338a09175ad 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -37,6 +37,7 @@ enum dbg_off_type {
37 doff_x32 = 1, 37 doff_x32 = 1,
38 doff_ulong = 2, 38 doff_ulong = 2,
39 doff_io32 = 3, 39 doff_io32 = 3,
40 doff_u8 = 4
40}; 41};
41 42
42/* offset to "wil" */ 43/* offset to "wil" */
@@ -346,6 +347,10 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
346 tbl[i].mode, dbg, 347 tbl[i].mode, dbg,
347 base + tbl[i].off); 348 base + tbl[i].off);
348 break; 349 break;
350 case doff_u8:
351 f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
352 base + tbl[i].off);
353 break;
349 default: 354 default:
350 f = ERR_PTR(-EINVAL); 355 f = ERR_PTR(-EINVAL);
351 } 356 }
@@ -821,13 +826,13 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
821 size_t len, loff_t *ppos) 826 size_t len, loff_t *ppos)
822{ 827{
823 struct wil6210_priv *wil = file->private_data; 828 struct wil6210_priv *wil = file->private_data;
824 struct wil6210_mbox_hdr_wmi *wmi; 829 struct wmi_cmd_hdr *wmi;
825 void *cmd; 830 void *cmd;
826 int cmdlen = len - sizeof(struct wil6210_mbox_hdr_wmi); 831 int cmdlen = len - sizeof(struct wmi_cmd_hdr);
827 u16 cmdid; 832 u16 cmdid;
828 int rc, rc1; 833 int rc, rc1;
829 834
830 if (cmdlen <= 0) 835 if (cmdlen < 0)
831 return -EINVAL; 836 return -EINVAL;
832 837
833 wmi = kmalloc(len, GFP_KERNEL); 838 wmi = kmalloc(len, GFP_KERNEL);
@@ -840,8 +845,8 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
840 return rc; 845 return rc;
841 } 846 }
842 847
843 cmd = &wmi[1]; 848 cmd = (cmdlen > 0) ? &wmi[1] : NULL;
844 cmdid = le16_to_cpu(wmi->id); 849 cmdid = le16_to_cpu(wmi->command_id);
845 850
846 rc1 = wmi_send(wil, cmdid, cmd, cmdlen); 851 rc1 = wmi_send(wil, cmdid, cmd, cmdlen);
847 kfree(wmi); 852 kfree(wmi);
@@ -985,7 +990,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data)
985 .interval_usec = 0, 990 .interval_usec = 0,
986 }; 991 };
987 struct { 992 struct {
988 struct wil6210_mbox_hdr_wmi wmi; 993 struct wmi_cmd_hdr wmi;
989 struct wmi_notify_req_done_event evt; 994 struct wmi_notify_req_done_event evt;
990 } __packed reply; 995 } __packed reply;
991 996
@@ -1333,6 +1338,34 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
1333 r->ssn_last_drop); 1338 r->ssn_last_drop);
1334} 1339}
1335 1340
1341static void wil_print_rxtid_crypto(struct seq_file *s, int tid,
1342 struct wil_tid_crypto_rx *c)
1343{
1344 int i;
1345
1346 for (i = 0; i < 4; i++) {
1347 struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
1348
1349 if (cc->key_set)
1350 goto has_keys;
1351 }
1352 return;
1353
1354has_keys:
1355 if (tid < WIL_STA_TID_NUM)
1356 seq_printf(s, " [%2d] PN", tid);
1357 else
1358 seq_puts(s, " [GR] PN");
1359
1360 for (i = 0; i < 4; i++) {
1361 struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
1362
1363 seq_printf(s, " [%i%s]%6phN", i, cc->key_set ? "+" : "-",
1364 cc->pn);
1365 }
1366 seq_puts(s, "\n");
1367}
1368
1336static int wil_sta_debugfs_show(struct seq_file *s, void *data) 1369static int wil_sta_debugfs_show(struct seq_file *s, void *data)
1337__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) 1370__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
1338{ 1371{
@@ -1360,18 +1393,25 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
1360 spin_lock_bh(&p->tid_rx_lock); 1393 spin_lock_bh(&p->tid_rx_lock);
1361 for (tid = 0; tid < WIL_STA_TID_NUM; tid++) { 1394 for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
1362 struct wil_tid_ampdu_rx *r = p->tid_rx[tid]; 1395 struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
1396 struct wil_tid_crypto_rx *c =
1397 &p->tid_crypto_rx[tid];
1363 1398
1364 if (r) { 1399 if (r) {
1365 seq_printf(s, "[%2d] ", tid); 1400 seq_printf(s, " [%2d] ", tid);
1366 wil_print_rxtid(s, r); 1401 wil_print_rxtid(s, r);
1367 } 1402 }
1403
1404 wil_print_rxtid_crypto(s, tid, c);
1368 } 1405 }
1406 wil_print_rxtid_crypto(s, WIL_STA_TID_NUM,
1407 &p->group_crypto_rx);
1369 spin_unlock_bh(&p->tid_rx_lock); 1408 spin_unlock_bh(&p->tid_rx_lock);
1370 seq_printf(s, 1409 seq_printf(s,
1371 "Rx invalid frame: non-data %lu, short %lu, large %lu\n", 1410 "Rx invalid frame: non-data %lu, short %lu, large %lu, replay %lu\n",
1372 p->stats.rx_non_data_frame, 1411 p->stats.rx_non_data_frame,
1373 p->stats.rx_short_frame, 1412 p->stats.rx_short_frame,
1374 p->stats.rx_large_frame); 1413 p->stats.rx_large_frame,
1414 p->stats.rx_replay);
1375 1415
1376 seq_puts(s, "Rx/MCS:"); 1416 seq_puts(s, "Rx/MCS:");
1377 for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs); 1417 for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
@@ -1487,6 +1527,7 @@ static const struct dbg_off dbg_wil_off[] = {
1487 WIL_FIELD(hw_version, S_IRUGO, doff_x32), 1527 WIL_FIELD(hw_version, S_IRUGO, doff_x32),
1488 WIL_FIELD(recovery_count, S_IRUGO, doff_u32), 1528 WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
1489 WIL_FIELD(ap_isolate, S_IRUGO, doff_u32), 1529 WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
1530 WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR, doff_u8),
1490 {}, 1531 {},
1491}; 1532};
1492 1533
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 4f2ffa5c6e17..fe66b2b646f0 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -391,12 +391,14 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
391 wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr); 391 wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
392 392
393 if (isr & ISR_MISC_FW_ERROR) { 393 if (isr & ISR_MISC_FW_ERROR) {
394 wil->recovery_state = fw_recovery_pending;
394 wil_fw_core_dump(wil); 395 wil_fw_core_dump(wil);
395 wil_notify_fw_error(wil); 396 wil_notify_fw_error(wil);
396 isr &= ~ISR_MISC_FW_ERROR; 397 isr &= ~ISR_MISC_FW_ERROR;
397 if (wil->platform_ops.notify_crash) { 398 if (wil->platform_ops.notify) {
398 wil_err(wil, "notify platform driver about FW crash"); 399 wil_err(wil, "notify platform driver about FW crash");
399 wil->platform_ops.notify_crash(wil->platform_handle); 400 wil->platform_ops.notify(wil->platform_handle,
401 WIL_PLATFORM_EVT_FW_CRASH);
400 } else { 402 } else {
401 wil_fw_error_recovery(wil); 403 wil_fw_error_recovery(wil);
402 } 404 }
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
index f7f948621951..630380078236 100644
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -161,13 +161,20 @@ out_free:
161 161
162int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd) 162int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd)
163{ 163{
164 int ret;
165
164 switch (cmd) { 166 switch (cmd) {
165 case WIL_IOCTL_MEMIO: 167 case WIL_IOCTL_MEMIO:
166 return wil_ioc_memio_dword(wil, data); 168 ret = wil_ioc_memio_dword(wil, data);
169 break;
167 case WIL_IOCTL_MEMIO_BLOCK: 170 case WIL_IOCTL_MEMIO_BLOCK:
168 return wil_ioc_memio_block(wil, data); 171 ret = wil_ioc_memio_block(wil, data);
172 break;
169 default: 173 default:
170 wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd); 174 wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
171 return -ENOIOCTLCMD; 175 return -ENOIOCTLCMD;
172 } 176 }
177
178 wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
179 return ret;
173} 180}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 78ba6e04c944..8d4e8843004e 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -27,6 +27,11 @@ bool debug_fw; /* = false; */
27module_param(debug_fw, bool, S_IRUGO); 27module_param(debug_fw, bool, S_IRUGO);
28MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug"); 28MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
29 29
30static bool oob_mode;
31module_param(oob_mode, bool, S_IRUGO);
32MODULE_PARM_DESC(oob_mode,
33 " enable out of the box (OOB) mode in FW, for diagnostics and certification");
34
30bool no_fw_recovery; 35bool no_fw_recovery;
31module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR); 36module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
32MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery"); 37MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
@@ -149,7 +154,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
149 might_sleep(); 154 might_sleep();
150 wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid, 155 wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
151 sta->status); 156 sta->status);
152 157 /* inform upper/lower layers */
153 if (sta->status != wil_sta_unused) { 158 if (sta->status != wil_sta_unused) {
154 if (!from_event) 159 if (!from_event)
155 wmi_disconnect_sta(wil, sta->addr, reason_code, true); 160 wmi_disconnect_sta(wil, sta->addr, reason_code, true);
@@ -165,7 +170,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
165 } 170 }
166 sta->status = wil_sta_unused; 171 sta->status = wil_sta_unused;
167 } 172 }
168 173 /* reorder buffers */
169 for (i = 0; i < WIL_STA_TID_NUM; i++) { 174 for (i = 0; i < WIL_STA_TID_NUM; i++) {
170 struct wil_tid_ampdu_rx *r; 175 struct wil_tid_ampdu_rx *r;
171 176
@@ -177,10 +182,15 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
177 182
178 spin_unlock_bh(&sta->tid_rx_lock); 183 spin_unlock_bh(&sta->tid_rx_lock);
179 } 184 }
185 /* crypto context */
186 memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
187 memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
188 /* release vrings */
180 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { 189 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
181 if (wil->vring2cid_tid[i][0] == cid) 190 if (wil->vring2cid_tid[i][0] == cid)
182 wil_vring_fini_tx(wil, i); 191 wil_vring_fini_tx(wil, i);
183 } 192 }
193 /* statistics */
184 memset(&sta->stats, 0, sizeof(sta->stats)); 194 memset(&sta->stats, 0, sizeof(sta->stats));
185} 195}
186 196
@@ -300,6 +310,11 @@ void wil_set_recovery_state(struct wil6210_priv *wil, int state)
300 wake_up_interruptible(&wil->wq); 310 wake_up_interruptible(&wil->wq);
301} 311}
302 312
313bool wil_is_recovery_blocked(struct wil6210_priv *wil)
314{
315 return no_fw_recovery && (wil->recovery_state == fw_recovery_pending);
316}
317
303static void wil_fw_error_worker(struct work_struct *work) 318static void wil_fw_error_worker(struct work_struct *work)
304{ 319{
305 struct wil6210_priv *wil = container_of(work, struct wil6210_priv, 320 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
@@ -440,9 +455,8 @@ int wil_priv_init(struct wil6210_priv *wil)
440 455
441 mutex_init(&wil->mutex); 456 mutex_init(&wil->mutex);
442 mutex_init(&wil->wmi_mutex); 457 mutex_init(&wil->wmi_mutex);
443 mutex_init(&wil->back_rx_mutex);
444 mutex_init(&wil->back_tx_mutex);
445 mutex_init(&wil->probe_client_mutex); 458 mutex_init(&wil->probe_client_mutex);
459 mutex_init(&wil->p2p_wdev_mutex);
446 460
447 init_completion(&wil->wmi_ready); 461 init_completion(&wil->wmi_ready);
448 init_completion(&wil->wmi_call); 462 init_completion(&wil->wmi_call);
@@ -450,17 +464,15 @@ int wil_priv_init(struct wil6210_priv *wil)
450 wil->bcast_vring = -1; 464 wil->bcast_vring = -1;
451 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); 465 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
452 setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil); 466 setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
467 setup_timer(&wil->p2p.discovery_timer, wil_p2p_discovery_timer_fn,
468 (ulong)wil);
453 469
454 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); 470 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
455 INIT_WORK(&wil->wmi_event_worker, wmi_event_worker); 471 INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
456 INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker); 472 INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
457 INIT_WORK(&wil->back_rx_worker, wil_back_rx_worker);
458 INIT_WORK(&wil->back_tx_worker, wil_back_tx_worker);
459 INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker); 473 INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker);
460 474
461 INIT_LIST_HEAD(&wil->pending_wmi_ev); 475 INIT_LIST_HEAD(&wil->pending_wmi_ev);
462 INIT_LIST_HEAD(&wil->back_rx_pending);
463 INIT_LIST_HEAD(&wil->back_tx_pending);
464 INIT_LIST_HEAD(&wil->probe_client_pending); 476 INIT_LIST_HEAD(&wil->probe_client_pending);
465 spin_lock_init(&wil->wmi_ev_lock); 477 spin_lock_init(&wil->wmi_ev_lock);
466 init_waitqueue_head(&wil->wq); 478 init_waitqueue_head(&wil->wq);
@@ -514,16 +526,14 @@ void wil_priv_deinit(struct wil6210_priv *wil)
514 526
515 wil_set_recovery_state(wil, fw_recovery_idle); 527 wil_set_recovery_state(wil, fw_recovery_idle);
516 del_timer_sync(&wil->scan_timer); 528 del_timer_sync(&wil->scan_timer);
529 del_timer_sync(&wil->p2p.discovery_timer);
517 cancel_work_sync(&wil->disconnect_worker); 530 cancel_work_sync(&wil->disconnect_worker);
518 cancel_work_sync(&wil->fw_error_worker); 531 cancel_work_sync(&wil->fw_error_worker);
532 cancel_work_sync(&wil->p2p.discovery_expired_work);
519 mutex_lock(&wil->mutex); 533 mutex_lock(&wil->mutex);
520 wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); 534 wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
521 mutex_unlock(&wil->mutex); 535 mutex_unlock(&wil->mutex);
522 wmi_event_flush(wil); 536 wmi_event_flush(wil);
523 wil_back_rx_flush(wil);
524 cancel_work_sync(&wil->back_rx_worker);
525 wil_back_tx_flush(wil);
526 cancel_work_sync(&wil->back_tx_worker);
527 wil_probe_client_flush(wil); 537 wil_probe_client_flush(wil);
528 cancel_work_sync(&wil->probe_client_worker); 538 cancel_work_sync(&wil->probe_client_worker);
529 destroy_workqueue(wil->wq_service); 539 destroy_workqueue(wil->wq_service);
@@ -542,6 +552,16 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
542 wil_w(wil, RGF_USER_USER_CPU_0, 1); 552 wil_w(wil, RGF_USER_USER_CPU_0, 1);
543} 553}
544 554
555static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
556{
557 wil_info(wil, "%s: enable=%d\n", __func__, enable);
558 if (enable) {
559 wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
560 } else {
561 wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
562 }
563}
564
545static int wil_target_reset(struct wil6210_priv *wil) 565static int wil_target_reset(struct wil6210_priv *wil)
546{ 566{
547 int delay = 0; 567 int delay = 0;
@@ -637,6 +657,7 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
637static int wil_get_bl_info(struct wil6210_priv *wil) 657static int wil_get_bl_info(struct wil6210_priv *wil)
638{ 658{
639 struct net_device *ndev = wil_to_ndev(wil); 659 struct net_device *ndev = wil_to_ndev(wil);
660 struct wiphy *wiphy = wil_to_wiphy(wil);
640 union { 661 union {
641 struct bl_dedicated_registers_v0 bl0; 662 struct bl_dedicated_registers_v0 bl0;
642 struct bl_dedicated_registers_v1 bl1; 663 struct bl_dedicated_registers_v1 bl1;
@@ -681,6 +702,7 @@ static int wil_get_bl_info(struct wil6210_priv *wil)
681 } 702 }
682 703
683 ether_addr_copy(ndev->perm_addr, mac); 704 ether_addr_copy(ndev->perm_addr, mac);
705 ether_addr_copy(wiphy->perm_addr, mac);
684 if (!is_valid_ether_addr(ndev->dev_addr)) 706 if (!is_valid_ether_addr(ndev->dev_addr))
685 ether_addr_copy(ndev->dev_addr, mac); 707 ether_addr_copy(ndev->dev_addr, mac);
686 708
@@ -767,6 +789,15 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
767 if (wil->hw_version == HW_VER_UNKNOWN) 789 if (wil->hw_version == HW_VER_UNKNOWN)
768 return -ENODEV; 790 return -ENODEV;
769 791
792 if (wil->platform_ops.notify) {
793 rc = wil->platform_ops.notify(wil->platform_handle,
794 WIL_PLATFORM_EVT_PRE_RESET);
795 if (rc)
796 wil_err(wil,
797 "%s: PRE_RESET platform notify failed, rc %d\n",
798 __func__, rc);
799 }
800
770 set_bit(wil_status_resetting, wil->status); 801 set_bit(wil_status_resetting, wil->status);
771 802
772 cancel_work_sync(&wil->disconnect_worker); 803 cancel_work_sync(&wil->disconnect_worker);
@@ -807,6 +838,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
807 if (rc) 838 if (rc)
808 return rc; 839 return rc;
809 840
841 wil_set_oob_mode(wil, oob_mode);
810 if (load_fw) { 842 if (load_fw) {
811 wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME, 843 wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME,
812 WIL_FW2_NAME); 844 WIL_FW2_NAME);
@@ -846,8 +878,27 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
846 878
847 /* we just started MAC, wait for FW ready */ 879 /* we just started MAC, wait for FW ready */
848 rc = wil_wait_for_fw_ready(wil); 880 rc = wil_wait_for_fw_ready(wil);
849 if (rc == 0) /* check FW is responsive */ 881 if (rc)
850 rc = wmi_echo(wil); 882 return rc;
883
884 /* check FW is responsive */
885 rc = wmi_echo(wil);
886 if (rc) {
887 wil_err(wil, "%s: wmi_echo failed, rc %d\n",
888 __func__, rc);
889 return rc;
890 }
891
892 if (wil->platform_ops.notify) {
893 rc = wil->platform_ops.notify(wil->platform_handle,
894 WIL_PLATFORM_EVT_FW_RDY);
895 if (rc) {
896 wil_err(wil,
897 "%s: FW_RDY notify failed, rc %d\n",
898 __func__, rc);
899 rc = 0;
900 }
901 }
851 } 902 }
852 903
853 return rc; 904 return rc;
@@ -954,6 +1005,8 @@ int __wil_down(struct wil6210_priv *wil)
954 } 1005 }
955 wil_enable_irq(wil); 1006 wil_enable_irq(wil);
956 1007
1008 (void)wil_p2p_stop_discovery(wil);
1009
957 if (wil->scan_request) { 1010 if (wil->scan_request) {
958 wil_dbg_misc(wil, "Abort scan_request 0x%p\n", 1011 wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
959 wil->scan_request); 1012 wil->scan_request);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index ecc3c1bdae4b..098409753d5b 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -60,11 +60,7 @@ static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
60{ 60{
61 struct wil6210_priv *wil = ndev_to_wil(ndev); 61 struct wil6210_priv *wil = ndev_to_wil(ndev);
62 62
63 int ret = wil_ioctl(wil, ifr->ifr_data, cmd); 63 return wil_ioctl(wil, ifr->ifr_data, cmd);
64
65 wil_dbg_misc(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
66
67 return ret;
68} 64}
69 65
70static const struct net_device_ops wil_netdev_ops = { 66static const struct net_device_ops wil_netdev_ops = {
@@ -149,6 +145,7 @@ void *wil_if_alloc(struct device *dev)
149 145
150 wil = wdev_to_wil(wdev); 146 wil = wdev_to_wil(wdev);
151 wil->wdev = wdev; 147 wil->wdev = wdev;
148 wil->radio_wdev = wdev;
152 149
153 wil_dbg_misc(wil, "%s()\n", __func__); 150 wil_dbg_misc(wil, "%s()\n", __func__);
154 151
@@ -160,7 +157,7 @@ void *wil_if_alloc(struct device *dev)
160 157
161 wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */ 158 wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
162 /* default monitor channel */ 159 /* default monitor channel */
163 ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels; 160 ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
164 cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT); 161 cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
165 162
166 ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup); 163 ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
new file mode 100644
index 000000000000..2c1b8958180e
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -0,0 +1,253 @@
1/*
2 * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "wil6210.h"
18#include "wmi.h"
19
20#define P2P_WILDCARD_SSID "DIRECT-"
21#define P2P_DMG_SOCIAL_CHANNEL 2
22#define P2P_SEARCH_DURATION_MS 500
23#define P2P_DEFAULT_BI 100
24
25void wil_p2p_discovery_timer_fn(ulong x)
26{
27 struct wil6210_priv *wil = (void *)x;
28
29 wil_dbg_misc(wil, "%s\n", __func__);
30
31 schedule_work(&wil->p2p.discovery_expired_work);
32}
33
34int wil_p2p_search(struct wil6210_priv *wil,
35 struct cfg80211_scan_request *request)
36{
37 int rc;
38 struct wil_p2p_info *p2p = &wil->p2p;
39
40 wil_dbg_misc(wil, "%s: channel %d\n",
41 __func__, P2P_DMG_SOCIAL_CHANNEL);
42
43 mutex_lock(&wil->mutex);
44
45 if (p2p->discovery_started) {
46 wil_err(wil, "%s: search failed. discovery already ongoing\n",
47 __func__);
48 rc = -EBUSY;
49 goto out;
50 }
51
52 rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI);
53 if (rc) {
54 wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
55 goto out;
56 }
57
58 rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
59 if (rc) {
60 wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
61 goto out_stop;
62 }
63
64 /* Set application IE to probe request and probe response */
65 rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ,
66 request->ie_len, request->ie);
67 if (rc) {
68 wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n",
69 __func__);
70 goto out_stop;
71 }
72
73 /* supplicant doesn't provide Probe Response IEs. As a workaround -
74 * re-use Probe Request IEs
75 */
76 rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
77 request->ie_len, request->ie);
78 if (rc) {
79 wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n",
80 __func__);
81 goto out_stop;
82 }
83
84 rc = wmi_start_search(wil);
85 if (rc) {
86 wil_err(wil, "%s: wmi_start_search failed\n", __func__);
87 goto out_stop;
88 }
89
90 p2p->discovery_started = 1;
91 INIT_WORK(&p2p->discovery_expired_work, wil_p2p_search_expired);
92 mod_timer(&p2p->discovery_timer,
93 jiffies + msecs_to_jiffies(P2P_SEARCH_DURATION_MS));
94
95out_stop:
96 if (rc)
97 wmi_stop_discovery(wil);
98
99out:
100 mutex_unlock(&wil->mutex);
101 return rc;
102}
103
104int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
105 struct ieee80211_channel *chan, u64 *cookie)
106{
107 struct wil_p2p_info *p2p = &wil->p2p;
108 u8 channel = P2P_DMG_SOCIAL_CHANNEL;
109 int rc;
110
111 if (chan)
112 channel = chan->hw_value;
113
114 wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
115
116 mutex_lock(&wil->mutex);
117
118 if (p2p->discovery_started) {
119 wil_err(wil, "%s: discovery already ongoing\n", __func__);
120 rc = -EBUSY;
121 goto out;
122 }
123
124 rc = wmi_p2p_cfg(wil, channel, P2P_DEFAULT_BI);
125 if (rc) {
126 wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
127 goto out;
128 }
129
130 rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
131 if (rc) {
132 wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
133 goto out_stop;
134 }
135
136 rc = wmi_start_listen(wil);
137 if (rc) {
138 wil_err(wil, "%s: wmi_start_listen failed\n", __func__);
139 goto out_stop;
140 }
141
142 memcpy(&p2p->listen_chan, chan, sizeof(*chan));
143 *cookie = ++p2p->cookie;
144
145 p2p->discovery_started = 1;
146 INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired);
147 mod_timer(&p2p->discovery_timer,
148 jiffies + msecs_to_jiffies(duration));
149
150out_stop:
151 if (rc)
152 wmi_stop_discovery(wil);
153
154out:
155 mutex_unlock(&wil->mutex);
156 return rc;
157}
158
159u8 wil_p2p_stop_discovery(struct wil6210_priv *wil)
160{
161 struct wil_p2p_info *p2p = &wil->p2p;
162 u8 started = p2p->discovery_started;
163
164 if (p2p->discovery_started) {
165 del_timer_sync(&p2p->discovery_timer);
166 p2p->discovery_started = 0;
167 wmi_stop_discovery(wil);
168 }
169
170 return started;
171}
172
173int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
174{
175 struct wil_p2p_info *p2p = &wil->p2p;
176 u8 started;
177
178 mutex_lock(&wil->mutex);
179
180 if (cookie != p2p->cookie) {
181 wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
182 __func__, p2p->cookie, cookie);
183 mutex_unlock(&wil->mutex);
184 return -ENOENT;
185 }
186
187 started = wil_p2p_stop_discovery(wil);
188
189 mutex_unlock(&wil->mutex);
190
191 if (!started) {
192 wil_err(wil, "%s: listen not started\n", __func__);
193 return -ENOENT;
194 }
195
196 mutex_lock(&wil->p2p_wdev_mutex);
197 cfg80211_remain_on_channel_expired(wil->radio_wdev,
198 p2p->cookie,
199 &p2p->listen_chan,
200 GFP_KERNEL);
201 wil->radio_wdev = wil->wdev;
202 mutex_unlock(&wil->p2p_wdev_mutex);
203 return 0;
204}
205
206void wil_p2p_listen_expired(struct work_struct *work)
207{
208 struct wil_p2p_info *p2p = container_of(work,
209 struct wil_p2p_info, discovery_expired_work);
210 struct wil6210_priv *wil = container_of(p2p,
211 struct wil6210_priv, p2p);
212 u8 started;
213
214 wil_dbg_misc(wil, "%s()\n", __func__);
215
216 mutex_lock(&wil->mutex);
217 started = wil_p2p_stop_discovery(wil);
218 mutex_unlock(&wil->mutex);
219
220 if (started) {
221 mutex_lock(&wil->p2p_wdev_mutex);
222 cfg80211_remain_on_channel_expired(wil->radio_wdev,
223 p2p->cookie,
224 &p2p->listen_chan,
225 GFP_KERNEL);
226 wil->radio_wdev = wil->wdev;
227 mutex_unlock(&wil->p2p_wdev_mutex);
228 }
229
230}
231
232void wil_p2p_search_expired(struct work_struct *work)
233{
234 struct wil_p2p_info *p2p = container_of(work,
235 struct wil_p2p_info, discovery_expired_work);
236 struct wil6210_priv *wil = container_of(p2p,
237 struct wil6210_priv, p2p);
238 u8 started;
239
240 wil_dbg_misc(wil, "%s()\n", __func__);
241
242 mutex_lock(&wil->mutex);
243 started = wil_p2p_stop_discovery(wil);
244 mutex_unlock(&wil->mutex);
245
246 if (started) {
247 mutex_lock(&wil->p2p_wdev_mutex);
248 cfg80211_scan_done(wil->scan_request, 0);
249 wil->scan_request = NULL;
250 wil->radio_wdev = wil->wdev;
251 mutex_unlock(&wil->p2p_wdev_mutex);
252 }
253}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index e36f2a0c8cb6..aeb72c438e44 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -275,6 +275,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
275 pci_disable_device(pdev); 275 pci_disable_device(pdev);
276 if (wil->platform_ops.uninit) 276 if (wil->platform_ops.uninit)
277 wil->platform_ops.uninit(wil->platform_handle); 277 wil->platform_ops.uninit(wil->platform_handle);
278 wil_p2p_wdev_free(wil);
278 wil_if_free(wil); 279 wil_if_free(wil);
279} 280}
280 281
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 32031e7a11d5..19ed127d4d05 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. 2 * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -291,35 +291,15 @@ static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
291 return min(max_agg_size, req_agg_wsize); 291 return min(max_agg_size, req_agg_wsize);
292} 292}
293 293
294/* Block Ack - Rx side (recipient */ 294/* Block Ack - Rx side (recipient) */
295int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid, 295int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
296 u8 dialog_token, __le16 ba_param_set, 296 u8 dialog_token, __le16 ba_param_set,
297 __le16 ba_timeout, __le16 ba_seq_ctrl) 297 __le16 ba_timeout, __le16 ba_seq_ctrl)
298{
299 struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
300
301 if (!req)
302 return -ENOMEM;
303
304 req->cidxtid = cidxtid;
305 req->dialog_token = dialog_token;
306 req->ba_param_set = le16_to_cpu(ba_param_set);
307 req->ba_timeout = le16_to_cpu(ba_timeout);
308 req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
309
310 mutex_lock(&wil->back_rx_mutex);
311 list_add_tail(&req->list, &wil->back_rx_pending);
312 mutex_unlock(&wil->back_rx_mutex);
313
314 queue_work(wil->wq_service, &wil->back_rx_worker);
315
316 return 0;
317}
318
319static void wil_back_rx_handle(struct wil6210_priv *wil,
320 struct wil_back_rx *req)
321__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) 298__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
322{ 299{
300 u16 param_set = le16_to_cpu(ba_param_set);
301 u16 agg_timeout = le16_to_cpu(ba_timeout);
302 u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
323 struct wil_sta_info *sta; 303 struct wil_sta_info *sta;
324 u8 cid, tid; 304 u8 cid, tid;
325 u16 agg_wsize = 0; 305 u16 agg_wsize = 0;
@@ -328,34 +308,35 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
328 * bits 2..5: TID 308 * bits 2..5: TID
329 * bits 6..15: buffer size 309 * bits 6..15: buffer size
330 */ 310 */
331 u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15); 311 u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
332 bool agg_amsdu = !!(req->ba_param_set & BIT(0)); 312 bool agg_amsdu = !!(param_set & BIT(0));
333 int ba_policy = req->ba_param_set & BIT(1); 313 int ba_policy = param_set & BIT(1);
334 u16 agg_timeout = req->ba_timeout;
335 u16 status = WLAN_STATUS_SUCCESS; 314 u16 status = WLAN_STATUS_SUCCESS;
336 u16 ssn = req->ba_seq_ctrl >> 4; 315 u16 ssn = seq_ctrl >> 4;
337 struct wil_tid_ampdu_rx *r; 316 struct wil_tid_ampdu_rx *r;
338 int rc; 317 int rc = 0;
339 318
340 might_sleep(); 319 might_sleep();
341 parse_cidxtid(req->cidxtid, &cid, &tid); 320 parse_cidxtid(cidxtid, &cid, &tid);
342 321
343 /* sanity checks */ 322 /* sanity checks */
344 if (cid >= WIL6210_MAX_CID) { 323 if (cid >= WIL6210_MAX_CID) {
345 wil_err(wil, "BACK: invalid CID %d\n", cid); 324 wil_err(wil, "BACK: invalid CID %d\n", cid);
346 return; 325 rc = -EINVAL;
326 goto out;
347 } 327 }
348 328
349 sta = &wil->sta[cid]; 329 sta = &wil->sta[cid];
350 if (sta->status != wil_sta_connected) { 330 if (sta->status != wil_sta_connected) {
351 wil_err(wil, "BACK: CID %d not connected\n", cid); 331 wil_err(wil, "BACK: CID %d not connected\n", cid);
352 return; 332 rc = -EINVAL;
333 goto out;
353 } 334 }
354 335
355 wil_dbg_wmi(wil, 336 wil_dbg_wmi(wil,
356 "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n", 337 "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
357 cid, sta->addr, tid, req_agg_wsize, req->ba_timeout, 338 cid, sta->addr, tid, req_agg_wsize, agg_timeout,
358 agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn); 339 agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn);
359 340
360 /* apply policies */ 341 /* apply policies */
361 if (ba_policy) { 342 if (ba_policy) {
@@ -365,10 +346,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
365 if (status == WLAN_STATUS_SUCCESS) 346 if (status == WLAN_STATUS_SUCCESS)
366 agg_wsize = wil_agg_size(wil, req_agg_wsize); 347 agg_wsize = wil_agg_size(wil, req_agg_wsize);
367 348
368 rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status, 349 rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
369 agg_amsdu, agg_wsize, agg_timeout); 350 agg_amsdu, agg_wsize, agg_timeout);
370 if (rc || (status != WLAN_STATUS_SUCCESS)) 351 if (rc || (status != WLAN_STATUS_SUCCESS)) {
371 return; 352 wil_err(wil, "%s: do not apply ba, rc(%d), status(%d)\n",
353 __func__, rc, status);
354 goto out;
355 }
372 356
373 /* apply */ 357 /* apply */
374 r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn); 358 r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
@@ -376,143 +360,37 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
376 wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]); 360 wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
377 sta->tid_rx[tid] = r; 361 sta->tid_rx[tid] = r;
378 spin_unlock_bh(&sta->tid_rx_lock); 362 spin_unlock_bh(&sta->tid_rx_lock);
379}
380
381void wil_back_rx_flush(struct wil6210_priv *wil)
382{
383 struct wil_back_rx *evt, *t;
384 363
385 wil_dbg_misc(wil, "%s()\n", __func__); 364out:
386 365 return rc;
387 mutex_lock(&wil->back_rx_mutex);
388
389 list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
390 list_del(&evt->list);
391 kfree(evt);
392 }
393
394 mutex_unlock(&wil->back_rx_mutex);
395}
396
397/* Retrieve next ADDBA request from the pending list */
398static struct list_head *next_back_rx(struct wil6210_priv *wil)
399{
400 struct list_head *ret = NULL;
401
402 mutex_lock(&wil->back_rx_mutex);
403
404 if (!list_empty(&wil->back_rx_pending)) {
405 ret = wil->back_rx_pending.next;
406 list_del(ret);
407 }
408
409 mutex_unlock(&wil->back_rx_mutex);
410
411 return ret;
412}
413
414void wil_back_rx_worker(struct work_struct *work)
415{
416 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
417 back_rx_worker);
418 struct wil_back_rx *evt;
419 struct list_head *lh;
420
421 while ((lh = next_back_rx(wil)) != NULL) {
422 evt = list_entry(lh, struct wil_back_rx, list);
423
424 wil_back_rx_handle(wil, evt);
425 kfree(evt);
426 }
427} 366}
428 367
429/* BACK - Tx (originator) side */ 368/* BACK - Tx side (originator) */
430static void wil_back_tx_handle(struct wil6210_priv *wil, 369int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
431 struct wil_back_tx *req)
432{ 370{
433 struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid]; 371 u8 agg_wsize = wil_agg_size(wil, wsize);
434 int rc; 372 u16 agg_timeout = 0;
373 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
374 int rc = 0;
435 375
436 if (txdata->addba_in_progress) { 376 if (txdata->addba_in_progress) {
437 wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n", 377 wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
438 req->ringid); 378 ringid);
439 return; 379 goto out;
440 } 380 }
441 if (txdata->agg_wsize) { 381 if (txdata->agg_wsize) {
442 wil_dbg_misc(wil, 382 wil_dbg_misc(wil,
443 "ADDBA for vring[%d] already established wsize %d\n", 383 "ADDBA for vring[%d] already done for wsize %d\n",
444 req->ringid, txdata->agg_wsize); 384 ringid, txdata->agg_wsize);
445 return; 385 goto out;
446 } 386 }
447 txdata->addba_in_progress = true; 387 txdata->addba_in_progress = true;
448 rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout); 388 rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout);
449 if (rc) 389 if (rc) {
390 wil_err(wil, "%s: wmi_addba failed, rc (%d)", __func__, rc);
450 txdata->addba_in_progress = false; 391 txdata->addba_in_progress = false;
451}
452
453static struct list_head *next_back_tx(struct wil6210_priv *wil)
454{
455 struct list_head *ret = NULL;
456
457 mutex_lock(&wil->back_tx_mutex);
458
459 if (!list_empty(&wil->back_tx_pending)) {
460 ret = wil->back_tx_pending.next;
461 list_del(ret);
462 }
463
464 mutex_unlock(&wil->back_tx_mutex);
465
466 return ret;
467}
468
469void wil_back_tx_worker(struct work_struct *work)
470{
471 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
472 back_tx_worker);
473 struct wil_back_tx *evt;
474 struct list_head *lh;
475
476 while ((lh = next_back_tx(wil)) != NULL) {
477 evt = list_entry(lh, struct wil_back_tx, list);
478
479 wil_back_tx_handle(wil, evt);
480 kfree(evt);
481 } 392 }
482}
483
484void wil_back_tx_flush(struct wil6210_priv *wil)
485{
486 struct wil_back_tx *evt, *t;
487
488 wil_dbg_misc(wil, "%s()\n", __func__);
489
490 mutex_lock(&wil->back_tx_mutex);
491
492 list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
493 list_del(&evt->list);
494 kfree(evt);
495 }
496
497 mutex_unlock(&wil->back_tx_mutex);
498}
499
500int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
501{
502 struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
503
504 if (!req)
505 return -ENOMEM;
506 393
507 req->ringid = ringid; 394out:
508 req->agg_wsize = wil_agg_size(wil, wsize); 395 return rc;
509 req->agg_timeout = 0;
510
511 mutex_lock(&wil->back_tx_mutex);
512 list_add_tail(&req->list, &wil->back_tx_pending);
513 mutex_unlock(&wil->back_tx_mutex);
514
515 queue_work(wil->wq_service, &wil->back_tx_worker);
516
517 return 0;
518} 396}
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index e59239d22b94..c4db2a9d9f7f 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013 Qualcomm Atheros, Inc. 2 * Copyright (c) 2013-2016 Qualcomm Atheros, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -37,39 +37,40 @@ static inline void trace_ ## name(proto) {}
37#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */ 37#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
38 38
39DECLARE_EVENT_CLASS(wil6210_wmi, 39DECLARE_EVENT_CLASS(wil6210_wmi,
40 TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len), 40 TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
41 41
42 TP_ARGS(wmi, buf, buf_len), 42 TP_ARGS(wmi, buf, buf_len),
43 43
44 TP_STRUCT__entry( 44 TP_STRUCT__entry(
45 __field(u8, mid) 45 __field(u8, mid)
46 __field(u16, id) 46 __field(u16, command_id)
47 __field(u32, timestamp) 47 __field(u32, fw_timestamp)
48 __field(u16, buf_len) 48 __field(u16, buf_len)
49 __dynamic_array(u8, buf, buf_len) 49 __dynamic_array(u8, buf, buf_len)
50 ), 50 ),
51 51
52 TP_fast_assign( 52 TP_fast_assign(
53 __entry->mid = wmi->mid; 53 __entry->mid = wmi->mid;
54 __entry->id = le16_to_cpu(wmi->id); 54 __entry->command_id = le16_to_cpu(wmi->command_id);
55 __entry->timestamp = le32_to_cpu(wmi->timestamp); 55 __entry->fw_timestamp = le32_to_cpu(wmi->fw_timestamp);
56 __entry->buf_len = buf_len; 56 __entry->buf_len = buf_len;
57 memcpy(__get_dynamic_array(buf), buf, buf_len); 57 memcpy(__get_dynamic_array(buf), buf, buf_len);
58 ), 58 ),
59 59
60 TP_printk( 60 TP_printk(
61 "MID %d id 0x%04x len %d timestamp %d", 61 "MID %d id 0x%04x len %d timestamp %d",
62 __entry->mid, __entry->id, __entry->buf_len, __entry->timestamp 62 __entry->mid, __entry->command_id, __entry->buf_len,
63 __entry->fw_timestamp
63 ) 64 )
64); 65);
65 66
66DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd, 67DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
67 TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len), 68 TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
68 TP_ARGS(wmi, buf, buf_len) 69 TP_ARGS(wmi, buf, buf_len)
69); 70);
70 71
71DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event, 72DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
72 TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len), 73 TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
73 TP_ARGS(wmi, buf, buf_len) 74 TP_ARGS(wmi, buf, buf_len)
74); 75);
75 76
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 6af20903cf89..f260b232fd57 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -549,6 +549,60 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
549 return rc; 549 return rc;
550} 550}
551 551
552/**
553 * reverse_memcmp - Compare two areas of memory, in reverse order
554 * @cs: One area of memory
555 * @ct: Another area of memory
556 * @count: The size of the area.
557 *
558 * Cut'n'paste from original memcmp (see lib/string.c)
559 * with minimal modifications
560 */
561static int reverse_memcmp(const void *cs, const void *ct, size_t count)
562{
563 const unsigned char *su1, *su2;
564 int res = 0;
565
566 for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
567 --su1, --su2, count--) {
568 res = *su1 - *su2;
569 if (res)
570 break;
571 }
572 return res;
573}
574
575static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
576{
577 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
578 int cid = wil_rxdesc_cid(d);
579 int tid = wil_rxdesc_tid(d);
580 int key_id = wil_rxdesc_key_id(d);
581 int mc = wil_rxdesc_mcast(d);
582 struct wil_sta_info *s = &wil->sta[cid];
583 struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
584 &s->tid_crypto_rx[tid];
585 struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
586 const u8 *pn = (u8 *)&d->mac.pn_15_0;
587
588 if (!cc->key_set) {
589 wil_err_ratelimited(wil,
590 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
591 cid, tid, mc, key_id);
592 return -EINVAL;
593 }
594
595 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
596 wil_err_ratelimited(wil,
597 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
598 cid, tid, mc, key_id, pn, cc->pn);
599 return -EINVAL;
600 }
601 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
602
603 return 0;
604}
605
552/* 606/*
553 * Pass Rx packet to the netif. Update statistics. 607 * Pass Rx packet to the netif. Update statistics.
554 * Called in softirq context (NAPI poll). 608 * Called in softirq context (NAPI poll).
@@ -561,6 +615,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
561 unsigned int len = skb->len; 615 unsigned int len = skb->len;
562 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 616 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
563 int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ 617 int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
618 int security = wil_rxdesc_security(d);
564 struct ethhdr *eth = (void *)skb->data; 619 struct ethhdr *eth = (void *)skb->data;
565 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication 620 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
566 * is not suitable, need to look at data 621 * is not suitable, need to look at data
@@ -586,6 +641,13 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
586 641
587 skb_orphan(skb); 642 skb_orphan(skb);
588 643
644 if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
645 rc = GRO_DROP;
646 dev_kfree_skb(skb);
647 stats->rx_replay++;
648 goto stats;
649 }
650
589 if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) { 651 if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
590 if (mcast) { 652 if (mcast) {
591 /* send multicast frames both to higher layers in 653 /* send multicast frames both to higher layers in
@@ -627,6 +689,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
627 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", 689 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
628 len, gro_res_str[rc]); 690 len, gro_res_str[rc]);
629 } 691 }
692stats:
630 /* statistics. rc set to GRO_NORMAL for AP bridging */ 693 /* statistics. rc set to GRO_NORMAL for AP bridging */
631 if (unlikely(rc == GRO_DROP)) { 694 if (unlikely(rc == GRO_DROP)) {
632 ndev->stats.rx_dropped++; 695 ndev->stats.rx_dropped++;
@@ -757,7 +820,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
757 }, 820 },
758 }; 821 };
759 struct { 822 struct {
760 struct wil6210_mbox_hdr_wmi wmi; 823 struct wmi_cmd_hdr wmi;
761 struct wmi_vring_cfg_done_event cmd; 824 struct wmi_vring_cfg_done_event cmd;
762 } __packed reply; 825 } __packed reply;
763 struct vring *vring = &wil->vring_tx[id]; 826 struct vring *vring = &wil->vring_tx[id];
@@ -834,7 +897,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
834 }, 897 },
835 }; 898 };
836 struct { 899 struct {
837 struct wil6210_mbox_hdr_wmi wmi; 900 struct wmi_cmd_hdr wmi;
838 struct wmi_vring_cfg_done_event cmd; 901 struct wmi_vring_cfg_done_event cmd;
839 } __packed reply; 902 } __packed reply;
840 struct vring *vring = &wil->vring_tx[id]; 903 struct vring *vring = &wil->vring_tx[id];
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index ee7c7b4b9a17..fcdffaa8251b 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. 2 * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -480,6 +480,16 @@ static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d)
480 return WIL_GET_BITS(d->mac.d0, 28, 31); 480 return WIL_GET_BITS(d->mac.d0, 28, 31);
481} 481}
482 482
483static inline int wil_rxdesc_key_id(struct vring_rx_desc *d)
484{
485 return WIL_GET_BITS(d->mac.d1, 4, 5);
486}
487
488static inline int wil_rxdesc_security(struct vring_rx_desc *d)
489{
490 return WIL_GET_BITS(d->mac.d1, 7, 7);
491}
492
483static inline int wil_rxdesc_ds_bits(struct vring_rx_desc *d) 493static inline int wil_rxdesc_ds_bits(struct vring_rx_desc *d)
484{ 494{
485 return WIL_GET_BITS(d->mac.d1, 8, 9); 495 return WIL_GET_BITS(d->mac.d1, 8, 9);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 8427d68b6fa8..4d699ea46373 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -22,6 +22,7 @@
22#include <net/cfg80211.h> 22#include <net/cfg80211.h>
23#include <linux/timex.h> 23#include <linux/timex.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include "wmi.h"
25#include "wil_platform.h" 26#include "wil_platform.h"
26 27
27extern bool no_fw_recovery; 28extern bool no_fw_recovery;
@@ -131,6 +132,7 @@ struct RGF_ICR {
131/* registers - FW addresses */ 132/* registers - FW addresses */
132#define RGF_USER_USAGE_1 (0x880004) 133#define RGF_USER_USAGE_1 (0x880004)
133#define RGF_USER_USAGE_6 (0x880018) 134#define RGF_USER_USAGE_6 (0x880018)
135 #define BIT_USER_OOB_MODE BIT(31)
134#define RGF_USER_HW_MACHINE_STATE (0x8801dc) 136#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
135 #define HW_MACHINE_BOOT_DONE (0x3fffffd) 137 #define HW_MACHINE_BOOT_DONE (0x3fffffd)
136#define RGF_USER_USER_CPU_0 (0x8801e0) 138#define RGF_USER_USER_CPU_0 (0x8801e0)
@@ -334,29 +336,11 @@ struct wil6210_mbox_hdr {
334/* max. value for wil6210_mbox_hdr.len */ 336/* max. value for wil6210_mbox_hdr.len */
335#define MAX_MBOXITEM_SIZE (240) 337#define MAX_MBOXITEM_SIZE (240)
336 338
337/**
338 * struct wil6210_mbox_hdr_wmi - WMI header
339 *
340 * @mid: MAC ID
341 * 00 - default, created by FW
342 * 01..0f - WiFi ports, driver to create
343 * 10..fe - debug
344 * ff - broadcast
345 * @id: command/event ID
346 * @timestamp: FW fills for events, free-running msec timer
347 */
348struct wil6210_mbox_hdr_wmi {
349 u8 mid;
350 u8 reserved;
351 __le16 id;
352 __le32 timestamp;
353} __packed;
354
355struct pending_wmi_event { 339struct pending_wmi_event {
356 struct list_head list; 340 struct list_head list;
357 struct { 341 struct {
358 struct wil6210_mbox_hdr hdr; 342 struct wil6210_mbox_hdr hdr;
359 struct wil6210_mbox_hdr_wmi wmi; 343 struct wmi_cmd_hdr wmi;
360 u8 data[0]; 344 u8 data[0];
361 } __packed event; 345 } __packed event;
362}; 346};
@@ -455,6 +439,29 @@ struct wil_tid_ampdu_rx {
455 bool first_time; /* is it 1-st time this buffer used? */ 439 bool first_time; /* is it 1-st time this buffer used? */
456}; 440};
457 441
442/**
443 * struct wil_tid_crypto_rx_single - TID crypto information (Rx).
444 *
445 * @pn: GCMP PN for the session
446 * @key_set: valid key present
447 */
448struct wil_tid_crypto_rx_single {
449 u8 pn[IEEE80211_GCMP_PN_LEN];
450 bool key_set;
451};
452
453struct wil_tid_crypto_rx {
454 struct wil_tid_crypto_rx_single key_id[4];
455};
456
457struct wil_p2p_info {
458 struct ieee80211_channel listen_chan;
459 u8 discovery_started;
460 u64 cookie;
461 struct timer_list discovery_timer; /* listen/search duration */
462 struct work_struct discovery_expired_work; /* listen/search expire */
463};
464
458enum wil_sta_status { 465enum wil_sta_status {
459 wil_sta_unused = 0, 466 wil_sta_unused = 0,
460 wil_sta_conn_pending = 1, 467 wil_sta_conn_pending = 1,
@@ -474,6 +481,7 @@ struct wil_net_stats {
474 unsigned long rx_non_data_frame; 481 unsigned long rx_non_data_frame;
475 unsigned long rx_short_frame; 482 unsigned long rx_short_frame;
476 unsigned long rx_large_frame; 483 unsigned long rx_large_frame;
484 unsigned long rx_replay;
477 u16 last_mcs_rx; 485 u16 last_mcs_rx;
478 u64 rx_per_mcs[WIL_MCS_MAX + 1]; 486 u64 rx_per_mcs[WIL_MCS_MAX + 1];
479}; 487};
@@ -495,6 +503,8 @@ struct wil_sta_info {
495 spinlock_t tid_rx_lock; /* guarding tid_rx array */ 503 spinlock_t tid_rx_lock; /* guarding tid_rx array */
496 unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)]; 504 unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)];
497 unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)]; 505 unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
506 struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM];
507 struct wil_tid_crypto_rx group_crypto_rx;
498}; 508};
499 509
500enum { 510enum {
@@ -507,24 +517,6 @@ enum {
507 hw_capability_last 517 hw_capability_last
508}; 518};
509 519
510struct wil_back_rx {
511 struct list_head list;
512 /* request params, converted to CPU byte order - what we asked for */
513 u8 cidxtid;
514 u8 dialog_token;
515 u16 ba_param_set;
516 u16 ba_timeout;
517 u16 ba_seq_ctrl;
518};
519
520struct wil_back_tx {
521 struct list_head list;
522 /* request params, converted to CPU byte order - what we asked for */
523 u8 ringid;
524 u8 agg_wsize;
525 u16 agg_timeout;
526};
527
528struct wil_probe_client_req { 520struct wil_probe_client_req {
529 struct list_head list; 521 struct list_head list;
530 u64 cookie; 522 u64 cookie;
@@ -595,13 +587,6 @@ struct wil6210_priv {
595 spinlock_t wmi_ev_lock; 587 spinlock_t wmi_ev_lock;
596 struct napi_struct napi_rx; 588 struct napi_struct napi_rx;
597 struct napi_struct napi_tx; 589 struct napi_struct napi_tx;
598 /* BACK */
599 struct list_head back_rx_pending;
600 struct mutex back_rx_mutex; /* protect @back_rx_pending */
601 struct work_struct back_rx_worker;
602 struct list_head back_tx_pending;
603 struct mutex back_tx_mutex; /* protect @back_tx_pending */
604 struct work_struct back_tx_worker;
605 /* keep alive */ 590 /* keep alive */
606 struct list_head probe_client_pending; 591 struct list_head probe_client_pending;
607 struct mutex probe_client_mutex; /* protect @probe_client_pending */ 592 struct mutex probe_client_mutex; /* protect @probe_client_pending */
@@ -622,11 +607,21 @@ struct wil6210_priv {
622 /* debugfs */ 607 /* debugfs */
623 struct dentry *debug; 608 struct dentry *debug;
624 struct debugfs_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)]; 609 struct debugfs_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
610 u8 discovery_mode;
625 611
626 void *platform_handle; 612 void *platform_handle;
627 struct wil_platform_ops platform_ops; 613 struct wil_platform_ops platform_ops;
628 614
629 struct pmc_ctx pmc; 615 struct pmc_ctx pmc;
616
617 bool pbss;
618
619 struct wil_p2p_info p2p;
620
621 /* P2P_DEVICE vif */
622 struct wireless_dev *p2p_wdev;
623 struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */
624 struct wireless_dev *radio_wdev;
630}; 625};
631 626
632#define wil_to_wiphy(i) (i->wdev->wiphy) 627#define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -722,6 +717,7 @@ void wil_priv_deinit(struct wil6210_priv *wil);
722int wil_reset(struct wil6210_priv *wil, bool no_fw); 717int wil_reset(struct wil6210_priv *wil, bool no_fw);
723void wil_fw_error_recovery(struct wil6210_priv *wil); 718void wil_fw_error_recovery(struct wil6210_priv *wil);
724void wil_set_recovery_state(struct wil6210_priv *wil, int state); 719void wil_set_recovery_state(struct wil6210_priv *wil, int state);
720bool wil_is_recovery_blocked(struct wil6210_priv *wil);
725int wil_up(struct wil6210_priv *wil); 721int wil_up(struct wil6210_priv *wil);
726int __wil_up(struct wil6210_priv *wil); 722int __wil_up(struct wil6210_priv *wil);
727int wil_down(struct wil6210_priv *wil); 723int wil_down(struct wil6210_priv *wil);
@@ -752,7 +748,6 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
752int wmi_echo(struct wil6210_priv *wil); 748int wmi_echo(struct wil6210_priv *wil);
753int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie); 749int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
754int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); 750int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
755int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
756int wmi_rxon(struct wil6210_priv *wil, bool on); 751int wmi_rxon(struct wil6210_priv *wil, bool on);
757int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); 752int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
758int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, 753int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
@@ -765,11 +760,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
765int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid, 760int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
766 u8 dialog_token, __le16 ba_param_set, 761 u8 dialog_token, __le16 ba_param_set,
767 __le16 ba_timeout, __le16 ba_seq_ctrl); 762 __le16 ba_timeout, __le16 ba_seq_ctrl);
768void wil_back_rx_worker(struct work_struct *work);
769void wil_back_rx_flush(struct wil6210_priv *wil);
770int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize); 763int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
771void wil_back_tx_worker(struct work_struct *work);
772void wil_back_tx_flush(struct wil6210_priv *wil);
773 764
774void wil6210_clear_irq(struct wil6210_priv *wil); 765void wil6210_clear_irq(struct wil6210_priv *wil);
775int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi); 766int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
@@ -779,6 +770,24 @@ void wil_unmask_irq(struct wil6210_priv *wil);
779void wil_configure_interrupt_moderation(struct wil6210_priv *wil); 770void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
780void wil_disable_irq(struct wil6210_priv *wil); 771void wil_disable_irq(struct wil6210_priv *wil);
781void wil_enable_irq(struct wil6210_priv *wil); 772void wil_enable_irq(struct wil6210_priv *wil);
773
774/* P2P */
775void wil_p2p_discovery_timer_fn(ulong x);
776int wil_p2p_search(struct wil6210_priv *wil,
777 struct cfg80211_scan_request *request);
778int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
779 struct ieee80211_channel *chan, u64 *cookie);
780u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
781int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
782void wil_p2p_listen_expired(struct work_struct *work);
783void wil_p2p_search_expired(struct work_struct *work);
784
785/* WMI for P2P */
786int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
787int wmi_start_listen(struct wil6210_priv *wil);
788int wmi_start_search(struct wil6210_priv *wil);
789int wmi_stop_discovery(struct wil6210_priv *wil);
790
782int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, 791int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
783 struct cfg80211_mgmt_tx_params *params, 792 struct cfg80211_mgmt_tx_params *params,
784 u64 *cookie); 793 u64 *cookie);
@@ -790,10 +799,11 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
790 799
791struct wireless_dev *wil_cfg80211_init(struct device *dev); 800struct wireless_dev *wil_cfg80211_init(struct device *dev);
792void wil_wdev_free(struct wil6210_priv *wil); 801void wil_wdev_free(struct wil6210_priv *wil);
802void wil_p2p_wdev_free(struct wil6210_priv *wil);
793 803
794int wmi_set_mac_address(struct wil6210_priv *wil, void *addr); 804int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
795int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, 805int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
796 u8 chan, u8 hidden_ssid); 806 u8 chan, u8 hidden_ssid, u8 is_go);
797int wmi_pcp_stop(struct wil6210_priv *wil); 807int wmi_pcp_stop(struct wil6210_priv *wil);
798void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, 808void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
799 u16 reason_code, bool from_event); 809 u16 reason_code, bool from_event);
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 9a949d910343..33d4a34b3b1c 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -19,6 +19,12 @@
19 19
20struct device; 20struct device;
21 21
22enum wil_platform_event {
23 WIL_PLATFORM_EVT_FW_CRASH = 0,
24 WIL_PLATFORM_EVT_PRE_RESET = 1,
25 WIL_PLATFORM_EVT_FW_RDY = 2,
26};
27
22/** 28/**
23 * struct wil_platform_ops - wil platform module calls from this 29 * struct wil_platform_ops - wil platform module calls from this
24 * driver to platform driver 30 * driver to platform driver
@@ -28,7 +34,7 @@ struct wil_platform_ops {
28 int (*suspend)(void *handle); 34 int (*suspend)(void *handle);
29 int (*resume)(void *handle); 35 int (*resume)(void *handle);
30 void (*uninit)(void *handle); 36 void (*uninit)(void *handle);
31 int (*notify_crash)(void *handle); 37 int (*notify)(void *handle, enum wil_platform_event evt);
32}; 38};
33 39
34/** 40/**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 493e721c4fa7..6ca28c3eff0a 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -176,7 +176,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
176{ 176{
177 struct { 177 struct {
178 struct wil6210_mbox_hdr hdr; 178 struct wil6210_mbox_hdr hdr;
179 struct wil6210_mbox_hdr_wmi wmi; 179 struct wmi_cmd_hdr wmi;
180 } __packed cmd = { 180 } __packed cmd = {
181 .hdr = { 181 .hdr = {
182 .type = WIL_MBOX_HDR_TYPE_WMI, 182 .type = WIL_MBOX_HDR_TYPE_WMI,
@@ -185,7 +185,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
185 }, 185 },
186 .wmi = { 186 .wmi = {
187 .mid = 0, 187 .mid = 0,
188 .id = cpu_to_le16(cmdid), 188 .command_id = cpu_to_le16(cmdid),
189 }, 189 },
190 }; 190 };
191 struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx; 191 struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
@@ -333,7 +333,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
333 } 333 }
334 334
335 ch_no = data->info.channel + 1; 335 ch_no = data->info.channel + 1;
336 freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ); 336 freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
337 channel = ieee80211_get_channel(wiphy, freq); 337 channel = ieee80211_get_channel(wiphy, freq);
338 signal = data->info.sqi; 338 signal = data->info.sqi;
339 d_status = le16_to_cpu(data->info.status); 339 d_status = le16_to_cpu(data->info.status);
@@ -368,6 +368,8 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
368 wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf, 368 wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf,
369 ie_len, true); 369 ie_len, true);
370 370
371 wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
372
371 bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame, 373 bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
372 d_len, signal, GFP_KERNEL); 374 d_len, signal, GFP_KERNEL);
373 if (bss) { 375 if (bss) {
@@ -378,8 +380,10 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
378 wil_err(wil, "cfg80211_inform_bss_frame() failed\n"); 380 wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
379 } 381 }
380 } else { 382 } else {
381 cfg80211_rx_mgmt(wil->wdev, freq, signal, 383 mutex_lock(&wil->p2p_wdev_mutex);
384 cfg80211_rx_mgmt(wil->radio_wdev, freq, signal,
382 (void *)rx_mgmt_frame, d_len, 0); 385 (void *)rx_mgmt_frame, d_len, 0);
386 mutex_unlock(&wil->p2p_wdev_mutex);
383 } 387 }
384} 388}
385 389
@@ -406,7 +410,10 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
406 wil->scan_request, aborted); 410 wil->scan_request, aborted);
407 411
408 del_timer_sync(&wil->scan_timer); 412 del_timer_sync(&wil->scan_timer);
413 mutex_lock(&wil->p2p_wdev_mutex);
409 cfg80211_scan_done(wil->scan_request, aborted); 414 cfg80211_scan_done(wil->scan_request, aborted);
415 wil->radio_wdev = wil->wdev;
416 mutex_unlock(&wil->p2p_wdev_mutex);
410 wil->scan_request = NULL; 417 wil->scan_request = NULL;
411 } else { 418 } else {
412 wil_err(wil, "SCAN_COMPLETE while not scanning\n"); 419 wil_err(wil, "SCAN_COMPLETE while not scanning\n");
@@ -487,6 +494,14 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
487 return; 494 return;
488 } 495 }
489 del_timer_sync(&wil->connect_timer); 496 del_timer_sync(&wil->connect_timer);
497 } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
498 (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
499 if (wil->sta[evt->cid].status != wil_sta_unused) {
500 wil_err(wil, "%s: AP: Invalid status %d for CID %d\n",
501 __func__, wil->sta[evt->cid].status, evt->cid);
502 mutex_unlock(&wil->mutex);
503 return;
504 }
490 } 505 }
491 506
492 /* FIXME FW can transmit only ucast frames to peer */ 507 /* FIXME FW can transmit only ucast frames to peer */
@@ -648,7 +663,7 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
648static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d, 663static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
649 int len) 664 int len)
650{ 665{
651 struct wmi_vring_ba_status_event *evt = d; 666 struct wmi_ba_status_event *evt = d;
652 struct vring_tx_data *txdata; 667 struct vring_tx_data *txdata;
653 668
654 wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n", 669 wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
@@ -834,10 +849,10 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
834 offsetof(struct wil6210_mbox_ring_desc, sync), 0); 849 offsetof(struct wil6210_mbox_ring_desc, sync), 0);
835 /* indicate */ 850 /* indicate */
836 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && 851 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
837 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { 852 (len >= sizeof(struct wmi_cmd_hdr))) {
838 struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi; 853 struct wmi_cmd_hdr *wmi = &evt->event.wmi;
839 u16 id = le16_to_cpu(wmi->id); 854 u16 id = le16_to_cpu(wmi->command_id);
840 u32 tstamp = le32_to_cpu(wmi->timestamp); 855 u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
841 spin_lock_irqsave(&wil->wmi_ev_lock, flags); 856 spin_lock_irqsave(&wil->wmi_ev_lock, flags);
842 if (wil->reply_id && wil->reply_id == id) { 857 if (wil->reply_id && wil->reply_id == id) {
843 if (wil->reply_buf) { 858 if (wil->reply_buf) {
@@ -947,7 +962,7 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
947} 962}
948 963
949int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, 964int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
950 u8 chan, u8 hidden_ssid) 965 u8 chan, u8 hidden_ssid, u8 is_go)
951{ 966{
952 int rc; 967 int rc;
953 968
@@ -958,9 +973,10 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
958 .channel = chan - 1, 973 .channel = chan - 1,
959 .pcp_max_assoc_sta = max_assoc_sta, 974 .pcp_max_assoc_sta = max_assoc_sta,
960 .hidden_ssid = hidden_ssid, 975 .hidden_ssid = hidden_ssid,
976 .is_go = is_go,
961 }; 977 };
962 struct { 978 struct {
963 struct wil6210_mbox_hdr_wmi wmi; 979 struct wmi_cmd_hdr wmi;
964 struct wmi_pcp_started_event evt; 980 struct wmi_pcp_started_event evt;
965 } __packed reply; 981 } __packed reply;
966 982
@@ -1014,7 +1030,7 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
1014{ 1030{
1015 int rc; 1031 int rc;
1016 struct { 1032 struct {
1017 struct wil6210_mbox_hdr_wmi wmi; 1033 struct wmi_cmd_hdr wmi;
1018 struct wmi_set_ssid_cmd cmd; 1034 struct wmi_set_ssid_cmd cmd;
1019 } __packed reply; 1035 } __packed reply;
1020 int len; /* reply.cmd.ssid_len in CPU order */ 1036 int len; /* reply.cmd.ssid_len in CPU order */
@@ -1047,7 +1063,7 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
1047{ 1063{
1048 int rc; 1064 int rc;
1049 struct { 1065 struct {
1050 struct wil6210_mbox_hdr_wmi wmi; 1066 struct wmi_cmd_hdr wmi;
1051 struct wmi_set_pcp_channel_cmd cmd; 1067 struct wmi_set_pcp_channel_cmd cmd;
1052 } __packed reply; 1068 } __packed reply;
1053 1069
@@ -1064,14 +1080,86 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
1064 return 0; 1080 return 0;
1065} 1081}
1066 1082
1067int wmi_p2p_cfg(struct wil6210_priv *wil, int channel) 1083int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi)
1068{ 1084{
1085 int rc;
1069 struct wmi_p2p_cfg_cmd cmd = { 1086 struct wmi_p2p_cfg_cmd cmd = {
1070 .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD, 1087 .discovery_mode = WMI_DISCOVERY_MODE_PEER2PEER,
1088 .bcon_interval = cpu_to_le16(bi),
1071 .channel = channel - 1, 1089 .channel = channel - 1,
1072 }; 1090 };
1091 struct {
1092 struct wmi_cmd_hdr wmi;
1093 struct wmi_p2p_cfg_done_event evt;
1094 } __packed reply;
1095
1096 wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n");
1097
1098 rc = wmi_call(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd),
1099 WMI_P2P_CFG_DONE_EVENTID, &reply, sizeof(reply), 300);
1100 if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
1101 wil_err(wil, "P2P_CFG failed. status %d\n", reply.evt.status);
1102 rc = -EINVAL;
1103 }
1073 1104
1074 return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd)); 1105 return rc;
1106}
1107
1108int wmi_start_listen(struct wil6210_priv *wil)
1109{
1110 int rc;
1111 struct {
1112 struct wmi_cmd_hdr wmi;
1113 struct wmi_listen_started_event evt;
1114 } __packed reply;
1115
1116 wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n");
1117
1118 rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
1119 WMI_LISTEN_STARTED_EVENTID, &reply, sizeof(reply), 300);
1120 if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
1121 wil_err(wil, "device failed to start listen. status %d\n",
1122 reply.evt.status);
1123 rc = -EINVAL;
1124 }
1125
1126 return rc;
1127}
1128
1129int wmi_start_search(struct wil6210_priv *wil)
1130{
1131 int rc;
1132 struct {
1133 struct wmi_cmd_hdr wmi;
1134 struct wmi_search_started_event evt;
1135 } __packed reply;
1136
1137 wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n");
1138
1139 rc = wmi_call(wil, WMI_START_SEARCH_CMDID, NULL, 0,
1140 WMI_SEARCH_STARTED_EVENTID, &reply, sizeof(reply), 300);
1141 if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
1142 wil_err(wil, "device failed to start search. status %d\n",
1143 reply.evt.status);
1144 rc = -EINVAL;
1145 }
1146
1147 return rc;
1148}
1149
1150int wmi_stop_discovery(struct wil6210_priv *wil)
1151{
1152 int rc;
1153
1154 wil_dbg_wmi(wil, "sending WMI_DISCOVERY_STOP_CMDID\n");
1155
1156 rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0,
1157 WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 100);
1158
1159 if (rc)
1160 wil_err(wil, "Failed to stop discovery\n");
1161
1162 return rc;
1075} 1163}
1076 1164
1077int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, 1165int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
@@ -1155,7 +1243,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
1155{ 1243{
1156 int rc; 1244 int rc;
1157 struct { 1245 struct {
1158 struct wil6210_mbox_hdr_wmi wmi; 1246 struct wmi_cmd_hdr wmi;
1159 struct wmi_listen_started_event evt; 1247 struct wmi_listen_started_event evt;
1160 } __packed reply; 1248 } __packed reply;
1161 1249
@@ -1192,7 +1280,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
1192 .host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh), 1280 .host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh),
1193 }; 1281 };
1194 struct { 1282 struct {
1195 struct wil6210_mbox_hdr_wmi wmi; 1283 struct wmi_cmd_hdr wmi;
1196 struct wmi_cfg_rx_chain_done_event evt; 1284 struct wmi_cfg_rx_chain_done_event evt;
1197 } __packed evt; 1285 } __packed evt;
1198 int rc; 1286 int rc;
@@ -1246,7 +1334,7 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
1246 .measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW), 1334 .measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW),
1247 }; 1335 };
1248 struct { 1336 struct {
1249 struct wil6210_mbox_hdr_wmi wmi; 1337 struct wmi_cmd_hdr wmi;
1250 struct wmi_temp_sense_done_event evt; 1338 struct wmi_temp_sense_done_event evt;
1251 } __packed reply; 1339 } __packed reply;
1252 1340
@@ -1272,7 +1360,7 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
1272 .disconnect_reason = cpu_to_le16(reason), 1360 .disconnect_reason = cpu_to_le16(reason),
1273 }; 1361 };
1274 struct { 1362 struct {
1275 struct wil6210_mbox_hdr_wmi wmi; 1363 struct wmi_cmd_hdr wmi;
1276 struct wmi_disconnect_event evt; 1364 struct wmi_disconnect_event evt;
1277 } __packed reply; 1365 } __packed reply;
1278 1366
@@ -1364,7 +1452,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
1364 .ba_timeout = cpu_to_le16(timeout), 1452 .ba_timeout = cpu_to_le16(timeout),
1365 }; 1453 };
1366 struct { 1454 struct {
1367 struct wil6210_mbox_hdr_wmi wmi; 1455 struct wmi_cmd_hdr wmi;
1368 struct wmi_rcp_addba_resp_sent_event evt; 1456 struct wmi_rcp_addba_resp_sent_event evt;
1369 } __packed reply; 1457 } __packed reply;
1370 1458
@@ -1420,10 +1508,10 @@ static void wmi_event_handle(struct wil6210_priv *wil,
1420 u16 len = le16_to_cpu(hdr->len); 1508 u16 len = le16_to_cpu(hdr->len);
1421 1509
1422 if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) && 1510 if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
1423 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { 1511 (len >= sizeof(struct wmi_cmd_hdr))) {
1424 struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]); 1512 struct wmi_cmd_hdr *wmi = (void *)(&hdr[1]);
1425 void *evt_data = (void *)(&wmi[1]); 1513 void *evt_data = (void *)(&wmi[1]);
1426 u16 id = le16_to_cpu(wmi->id); 1514 u16 id = le16_to_cpu(wmi->command_id);
1427 1515
1428 wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n", 1516 wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
1429 id, wil->reply_id); 1517 id, wil->reply_id);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 6e90e78f1554..29865e0b5203 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. 2 * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
3 * Copyright (c) 2006-2012 Wilocity . 3 * Copyright (c) 2006-2012 Wilocity
4 * 4 *
5 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -17,187 +17,197 @@
17 17
18/* 18/*
19 * This file contains the definitions of the WMI protocol specified in the 19 * This file contains the definitions of the WMI protocol specified in the
20 * Wireless Module Interface (WMI) for the Wilocity 20 * Wireless Module Interface (WMI) for the Qualcomm
21 * MARLON 60 Gigabit wireless solution. 21 * 60 GHz wireless solution.
22 * It includes definitions of all the commands and events. 22 * It includes definitions of all the commands and events.
23 * Commands are messages from the host to the WM. 23 * Commands are messages from the host to the WM.
24 * Events are messages from the WM to the host. 24 * Events are messages from the WM to the host.
25 *
26 * This is an automatically generated file.
25 */ 27 */
26 28
27#ifndef __WILOCITY_WMI_H__ 29#ifndef __WILOCITY_WMI_H__
28#define __WILOCITY_WMI_H__ 30#define __WILOCITY_WMI_H__
29 31
30/* General */ 32/* General */
31#define WILOCITY_MAX_ASSOC_STA (8) 33#define WMI_MAX_ASSOC_STA (8)
32#define WILOCITY_DEFAULT_ASSOC_STA (1) 34#define WMI_DEFAULT_ASSOC_STA (1)
33#define WMI_MAC_LEN (6) 35#define WMI_MAC_LEN (6)
34#define WMI_PROX_RANGE_NUM (3) 36#define WMI_PROX_RANGE_NUM (3)
35#define WMI_MAX_LOSS_DMG_BEACONS (32) 37#define WMI_MAX_LOSS_DMG_BEACONS (20)
38
39/* Mailbox interface
40 * used for commands and events
41 */
42enum wmi_mid {
43 MID_DEFAULT = 0x00,
44 FIRST_DBG_MID_ID = 0x10,
45 LAST_DBG_MID_ID = 0xFE,
46 MID_BROADCAST = 0xFF,
47};
48
49/* WMI_CMD_HDR */
50struct wmi_cmd_hdr {
51 u8 mid;
52 u8 reserved;
53 __le16 command_id;
54 __le32 fw_timestamp;
55} __packed;
36 56
37/* List of Commands */ 57/* List of Commands */
38enum wmi_command_id { 58enum wmi_command_id {
39 WMI_CONNECT_CMDID = 0x0001, 59 WMI_CONNECT_CMDID = 0x01,
40 WMI_DISCONNECT_CMDID = 0x0003, 60 WMI_DISCONNECT_CMDID = 0x03,
41 WMI_DISCONNECT_STA_CMDID = 0x0004, 61 WMI_DISCONNECT_STA_CMDID = 0x04,
42 WMI_START_SCAN_CMDID = 0x0007, 62 WMI_START_SCAN_CMDID = 0x07,
43 WMI_SET_BSS_FILTER_CMDID = 0x0009, 63 WMI_SET_BSS_FILTER_CMDID = 0x09,
44 WMI_SET_PROBED_SSID_CMDID = 0x000a, 64 WMI_SET_PROBED_SSID_CMDID = 0x0A,
45 WMI_SET_LISTEN_INT_CMDID = 0x000b, 65 WMI_SET_LISTEN_INT_CMDID = 0x0B,
46 WMI_BCON_CTRL_CMDID = 0x000f, 66 WMI_BCON_CTRL_CMDID = 0x0F,
47 WMI_ADD_CIPHER_KEY_CMDID = 0x0016, 67 WMI_ADD_CIPHER_KEY_CMDID = 0x16,
48 WMI_DELETE_CIPHER_KEY_CMDID = 0x0017, 68 WMI_DELETE_CIPHER_KEY_CMDID = 0x17,
49 WMI_SET_APPIE_CMDID = 0x003f, 69 WMI_PCP_CONF_CMDID = 0x18,
50 WMI_SET_WSC_STATUS_CMDID = 0x0041, 70 WMI_SET_APPIE_CMDID = 0x3F,
51 WMI_PXMT_RANGE_CFG_CMDID = 0x0042, 71 WMI_SET_WSC_STATUS_CMDID = 0x41,
52 WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043, 72 WMI_PXMT_RANGE_CFG_CMDID = 0x42,
53/* WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300, */ 73 WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43,
54 WMI_MEM_READ_CMDID = 0x0800, 74 WMI_MEM_READ_CMDID = 0x800,
55 WMI_MEM_WR_CMDID = 0x0801, 75 WMI_MEM_WR_CMDID = 0x801,
56 WMI_ECHO_CMDID = 0x0803, 76 WMI_ECHO_CMDID = 0x803,
57 WMI_DEEP_ECHO_CMDID = 0x0804, 77 WMI_DEEP_ECHO_CMDID = 0x804,
58 WMI_CONFIG_MAC_CMDID = 0x0805, 78 WMI_CONFIG_MAC_CMDID = 0x805,
59 WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806, 79 WMI_CONFIG_PHY_DEBUG_CMDID = 0x806,
60 WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808, 80 WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x808,
61 WMI_PHY_GET_STATISTICS_CMDID = 0x0809, 81 WMI_PHY_GET_STATISTICS_CMDID = 0x809,
62 WMI_FS_TUNE_CMDID = 0x080a, 82 WMI_FS_TUNE_CMDID = 0x80A,
63 WMI_CORR_MEASURE_CMDID = 0x080b, 83 WMI_CORR_MEASURE_CMDID = 0x80B,
64 WMI_READ_RSSI_CMDID = 0x080c, 84 WMI_READ_RSSI_CMDID = 0x80C,
65 WMI_TEMP_SENSE_CMDID = 0x080e, 85 WMI_TEMP_SENSE_CMDID = 0x80E,
66 WMI_DC_CALIB_CMDID = 0x080f, 86 WMI_DC_CALIB_CMDID = 0x80F,
67 WMI_SEND_TONE_CMDID = 0x0810, 87 WMI_SEND_TONE_CMDID = 0x810,
68 WMI_IQ_TX_CALIB_CMDID = 0x0811, 88 WMI_IQ_TX_CALIB_CMDID = 0x811,
69 WMI_IQ_RX_CALIB_CMDID = 0x0812, 89 WMI_IQ_RX_CALIB_CMDID = 0x812,
70 WMI_SET_UCODE_IDLE_CMDID = 0x0813, 90 WMI_SET_UCODE_IDLE_CMDID = 0x813,
71 WMI_SET_WORK_MODE_CMDID = 0x0815, 91 WMI_SET_WORK_MODE_CMDID = 0x815,
72 WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816, 92 WMI_LO_LEAKAGE_CALIB_CMDID = 0x816,
73 WMI_MARLON_R_READ_CMDID = 0x0818, 93 WMI_MARLON_R_READ_CMDID = 0x818,
74 WMI_MARLON_R_WRITE_CMDID = 0x0819, 94 WMI_MARLON_R_WRITE_CMDID = 0x819,
75 WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a, 95 WMI_MARLON_R_TXRX_SEL_CMDID = 0x81A,
76 MAC_IO_STATIC_PARAMS_CMDID = 0x081b, 96 MAC_IO_STATIC_PARAMS_CMDID = 0x81B,
77 MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c, 97 MAC_IO_DYNAMIC_PARAMS_CMDID = 0x81C,
78 WMI_SILENT_RSSI_CALIB_CMDID = 0x081d, 98 WMI_SILENT_RSSI_CALIB_CMDID = 0x81D,
79 WMI_RF_RX_TEST_CMDID = 0x081e, 99 WMI_RF_RX_TEST_CMDID = 0x81E,
80 WMI_CFG_RX_CHAIN_CMDID = 0x0820, 100 WMI_CFG_RX_CHAIN_CMDID = 0x820,
81 WMI_VRING_CFG_CMDID = 0x0821, 101 WMI_VRING_CFG_CMDID = 0x821,
82 WMI_BCAST_VRING_CFG_CMDID = 0x0822, 102 WMI_BCAST_VRING_CFG_CMDID = 0x822,
83 WMI_VRING_BA_EN_CMDID = 0x0823, 103 WMI_VRING_BA_EN_CMDID = 0x823,
84 WMI_VRING_BA_DIS_CMDID = 0x0824, 104 WMI_VRING_BA_DIS_CMDID = 0x824,
85 WMI_RCP_ADDBA_RESP_CMDID = 0x0825, 105 WMI_RCP_ADDBA_RESP_CMDID = 0x825,
86 WMI_RCP_DELBA_CMDID = 0x0826, 106 WMI_RCP_DELBA_CMDID = 0x826,
87 WMI_SET_SSID_CMDID = 0x0827, 107 WMI_SET_SSID_CMDID = 0x827,
88 WMI_GET_SSID_CMDID = 0x0828, 108 WMI_GET_SSID_CMDID = 0x828,
89 WMI_SET_PCP_CHANNEL_CMDID = 0x0829, 109 WMI_SET_PCP_CHANNEL_CMDID = 0x829,
90 WMI_GET_PCP_CHANNEL_CMDID = 0x082a, 110 WMI_GET_PCP_CHANNEL_CMDID = 0x82A,
91 WMI_SW_TX_REQ_CMDID = 0x082b, 111 WMI_SW_TX_REQ_CMDID = 0x82B,
92 WMI_READ_MAC_RXQ_CMDID = 0x0830, 112 WMI_READ_MAC_RXQ_CMDID = 0x830,
93 WMI_READ_MAC_TXQ_CMDID = 0x0831, 113 WMI_READ_MAC_TXQ_CMDID = 0x831,
94 WMI_WRITE_MAC_RXQ_CMDID = 0x0832, 114 WMI_WRITE_MAC_RXQ_CMDID = 0x832,
95 WMI_WRITE_MAC_TXQ_CMDID = 0x0833, 115 WMI_WRITE_MAC_TXQ_CMDID = 0x833,
96 WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x0834, 116 WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x834,
97 WMI_MLME_PUSH_CMDID = 0x0835, 117 WMI_MLME_PUSH_CMDID = 0x835,
98 WMI_BEAMFORMING_MGMT_CMDID = 0x0836, 118 WMI_BEAMFORMING_MGMT_CMDID = 0x836,
99 WMI_BF_TXSS_MGMT_CMDID = 0x0837, 119 WMI_BF_TXSS_MGMT_CMDID = 0x837,
100 WMI_BF_SM_MGMT_CMDID = 0x0838, 120 WMI_BF_SM_MGMT_CMDID = 0x838,
101 WMI_BF_RXSS_MGMT_CMDID = 0x0839, 121 WMI_BF_RXSS_MGMT_CMDID = 0x839,
102 WMI_BF_TRIG_CMDID = 0x083A, 122 WMI_BF_TRIG_CMDID = 0x83A,
103 WMI_SET_SECTORS_CMDID = 0x0849, 123 WMI_SET_SECTORS_CMDID = 0x849,
104 WMI_MAINTAIN_PAUSE_CMDID = 0x0850, 124 WMI_MAINTAIN_PAUSE_CMDID = 0x850,
105 WMI_MAINTAIN_RESUME_CMDID = 0x0851, 125 WMI_MAINTAIN_RESUME_CMDID = 0x851,
106 WMI_RS_MGMT_CMDID = 0x0852, 126 WMI_RS_MGMT_CMDID = 0x852,
107 WMI_RF_MGMT_CMDID = 0x0853, 127 WMI_RF_MGMT_CMDID = 0x853,
108 WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x0854, 128 WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x854,
109 WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x0855, 129 WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855,
130 WMI_OTP_READ_CMDID = 0x856,
131 WMI_OTP_WRITE_CMDID = 0x857,
110 /* Performance monitoring commands */ 132 /* Performance monitoring commands */
111 WMI_BF_CTRL_CMDID = 0x0862, 133 WMI_BF_CTRL_CMDID = 0x862,
112 WMI_NOTIFY_REQ_CMDID = 0x0863, 134 WMI_NOTIFY_REQ_CMDID = 0x863,
113 WMI_GET_STATUS_CMDID = 0x0864, 135 WMI_GET_STATUS_CMDID = 0x864,
114 WMI_UNIT_TEST_CMDID = 0x0900, 136 WMI_UNIT_TEST_CMDID = 0x900,
115 WMI_HICCUP_CMDID = 0x0901, 137 WMI_HICCUP_CMDID = 0x901,
116 WMI_FLASH_READ_CMDID = 0x0902, 138 WMI_FLASH_READ_CMDID = 0x902,
117 WMI_FLASH_WRITE_CMDID = 0x0903, 139 WMI_FLASH_WRITE_CMDID = 0x903,
118 WMI_SECURITY_UNIT_TEST_CMDID = 0x0904, 140 /* P2P */
119 /*P2P*/ 141 WMI_P2P_CFG_CMDID = 0x910,
120 WMI_P2P_CFG_CMDID = 0x0910, 142 WMI_PORT_ALLOCATE_CMDID = 0x911,
121 WMI_PORT_ALLOCATE_CMDID = 0x0911, 143 WMI_PORT_DELETE_CMDID = 0x912,
122 WMI_PORT_DELETE_CMDID = 0x0912, 144 WMI_POWER_MGMT_CFG_CMDID = 0x913,
123 WMI_POWER_MGMT_CFG_CMDID = 0x0913, 145 WMI_START_LISTEN_CMDID = 0x914,
124 WMI_START_LISTEN_CMDID = 0x0914, 146 WMI_START_SEARCH_CMDID = 0x915,
125 WMI_START_SEARCH_CMDID = 0x0915, 147 WMI_DISCOVERY_START_CMDID = 0x916,
126 WMI_DISCOVERY_START_CMDID = 0x0916, 148 WMI_DISCOVERY_STOP_CMDID = 0x917,
127 WMI_DISCOVERY_STOP_CMDID = 0x0917, 149 WMI_PCP_START_CMDID = 0x918,
128 WMI_PCP_START_CMDID = 0x0918, 150 WMI_PCP_STOP_CMDID = 0x919,
129 WMI_PCP_STOP_CMDID = 0x0919, 151 WMI_GET_PCP_FACTOR_CMDID = 0x91B,
130 WMI_GET_PCP_FACTOR_CMDID = 0x091b, 152 WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
131 153 WMI_ABORT_SCAN_CMDID = 0xF007,
132 WMI_SET_MAC_ADDRESS_CMDID = 0xf003, 154 WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
133 WMI_ABORT_SCAN_CMDID = 0xf007, 155 WMI_GET_PMK_CMDID = 0xF048,
134 WMI_SET_PMK_CMDID = 0xf028, 156 WMI_SET_PASSPHRASE_CMDID = 0xF049,
135 157 WMI_SEND_ASSOC_RES_CMDID = 0xF04A,
136 WMI_SET_PROMISCUOUS_MODE_CMDID = 0xf041, 158 WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xF04B,
137 WMI_GET_PMK_CMDID = 0xf048, 159 WMI_MAC_ADDR_REQ_CMDID = 0xF04D,
138 WMI_SET_PASSPHRASE_CMDID = 0xf049, 160 WMI_FW_VER_CMDID = 0xF04E,
139 WMI_SEND_ASSOC_RES_CMDID = 0xf04a, 161 WMI_PMC_CMDID = 0xF04F,
140 WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xf04b,
141 WMI_EAPOL_TX_CMDID = 0xf04c,
142 WMI_MAC_ADDR_REQ_CMDID = 0xf04d,
143 WMI_FW_VER_CMDID = 0xf04e,
144 WMI_PMC_CMDID = 0xf04f,
145}; 162};
146 163
147/* 164/* WMI_CONNECT_CMDID */
148 * Commands data structures
149 */
150
151/*
152 * WMI_CONNECT_CMDID
153 */
154enum wmi_network_type { 165enum wmi_network_type {
155 WMI_NETTYPE_INFRA = 0x01, 166 WMI_NETTYPE_INFRA = 0x01,
156 WMI_NETTYPE_ADHOC = 0x02, 167 WMI_NETTYPE_ADHOC = 0x02,
157 WMI_NETTYPE_ADHOC_CREATOR = 0x04, 168 WMI_NETTYPE_ADHOC_CREATOR = 0x04,
158 WMI_NETTYPE_AP = 0x10, 169 WMI_NETTYPE_AP = 0x10,
159 WMI_NETTYPE_P2P = 0x20, 170 WMI_NETTYPE_P2P = 0x20,
160 WMI_NETTYPE_WBE = 0x40, /* PCIE over 60g */ 171 /* PCIE over 60g */
172 WMI_NETTYPE_WBE = 0x40,
161}; 173};
162 174
163enum wmi_dot11_auth_mode { 175enum wmi_dot11_auth_mode {
164 WMI_AUTH11_OPEN = 0x01, 176 WMI_AUTH11_OPEN = 0x01,
165 WMI_AUTH11_SHARED = 0x02, 177 WMI_AUTH11_SHARED = 0x02,
166 WMI_AUTH11_LEAP = 0x04, 178 WMI_AUTH11_LEAP = 0x04,
167 WMI_AUTH11_WSC = 0x08, 179 WMI_AUTH11_WSC = 0x08,
168}; 180};
169 181
170enum wmi_auth_mode { 182enum wmi_auth_mode {
171 WMI_AUTH_NONE = 0x01, 183 WMI_AUTH_NONE = 0x01,
172 WMI_AUTH_WPA = 0x02, 184 WMI_AUTH_WPA = 0x02,
173 WMI_AUTH_WPA2 = 0x04, 185 WMI_AUTH_WPA2 = 0x04,
174 WMI_AUTH_WPA_PSK = 0x08, 186 WMI_AUTH_WPA_PSK = 0x08,
175 WMI_AUTH_WPA2_PSK = 0x10, 187 WMI_AUTH_WPA2_PSK = 0x10,
176 WMI_AUTH_WPA_CCKM = 0x20, 188 WMI_AUTH_WPA_CCKM = 0x20,
177 WMI_AUTH_WPA2_CCKM = 0x40, 189 WMI_AUTH_WPA2_CCKM = 0x40,
178}; 190};
179 191
180enum wmi_crypto_type { 192enum wmi_crypto_type {
181 WMI_CRYPT_NONE = 0x01, 193 WMI_CRYPT_NONE = 0x01,
182 WMI_CRYPT_WEP = 0x02, 194 WMI_CRYPT_AES_GCMP = 0x20,
183 WMI_CRYPT_TKIP = 0x04,
184 WMI_CRYPT_AES = 0x08,
185 WMI_CRYPT_AES_GCMP = 0x20,
186}; 195};
187 196
188enum wmi_connect_ctrl_flag_bits { 197enum wmi_connect_ctrl_flag_bits {
189 WMI_CONNECT_ASSOC_POLICY_USER = 0x0001, 198 WMI_CONNECT_ASSOC_POLICY_USER = 0x01,
190 WMI_CONNECT_SEND_REASSOC = 0x0002, 199 WMI_CONNECT_SEND_REASSOC = 0x02,
191 WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x0004, 200 WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x04,
192 WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008, 201 WMI_CONNECT_PROFILE_MATCH_DONE = 0x08,
193 WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010, 202 WMI_CONNECT_IGNORE_AAC_BEACON = 0x10,
194 WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020, 203 WMI_CONNECT_CSA_FOLLOW_BSS = 0x20,
195 WMI_CONNECT_DO_WPA_OFFLOAD = 0x0040, 204 WMI_CONNECT_DO_WPA_OFFLOAD = 0x40,
196 WMI_CONNECT_DO_NOT_DEAUTH = 0x0080, 205 WMI_CONNECT_DO_NOT_DEAUTH = 0x80,
197}; 206};
198 207
199#define WMI_MAX_SSID_LEN (32) 208#define WMI_MAX_SSID_LEN (32)
200 209
210/* WMI_CONNECT_CMDID */
201struct wmi_connect_cmd { 211struct wmi_connect_cmd {
202 u8 network_type; 212 u8 network_type;
203 u8 dot11_auth_mode; 213 u8 dot11_auth_mode;
@@ -216,31 +226,17 @@ struct wmi_connect_cmd {
216 u8 reserved1[2]; 226 u8 reserved1[2];
217} __packed; 227} __packed;
218 228
219/* 229/* WMI_DISCONNECT_STA_CMDID */
220 * WMI_DISCONNECT_STA_CMDID
221 */
222struct wmi_disconnect_sta_cmd { 230struct wmi_disconnect_sta_cmd {
223 u8 dst_mac[WMI_MAC_LEN]; 231 u8 dst_mac[WMI_MAC_LEN];
224 __le16 disconnect_reason; 232 __le16 disconnect_reason;
225} __packed; 233} __packed;
226 234
227/*
228 * WMI_SET_PMK_CMDID
229 */
230
231#define WMI_MIN_KEY_INDEX (0)
232#define WMI_MAX_KEY_INDEX (3) 235#define WMI_MAX_KEY_INDEX (3)
233#define WMI_MAX_KEY_LEN (32) 236#define WMI_MAX_KEY_LEN (32)
234#define WMI_PASSPHRASE_LEN (64) 237#define WMI_PASSPHRASE_LEN (64)
235#define WMI_PMK_LEN (32)
236
237struct wmi_set_pmk_cmd {
238 u8 pmk[WMI_PMK_LEN];
239} __packed;
240 238
241/* 239/* WMI_SET_PASSPHRASE_CMDID */
242 * WMI_SET_PASSPHRASE_CMDID
243 */
244struct wmi_set_passphrase_cmd { 240struct wmi_set_passphrase_cmd {
245 u8 ssid[WMI_MAX_SSID_LEN]; 241 u8 ssid[WMI_MAX_SSID_LEN];
246 u8 passphrase[WMI_PASSPHRASE_LEN]; 242 u8 passphrase[WMI_PASSPHRASE_LEN];
@@ -248,36 +244,34 @@ struct wmi_set_passphrase_cmd {
248 u8 passphrase_len; 244 u8 passphrase_len;
249} __packed; 245} __packed;
250 246
251/* 247/* WMI_ADD_CIPHER_KEY_CMDID */
252 * WMI_ADD_CIPHER_KEY_CMDID
253 */
254enum wmi_key_usage { 248enum wmi_key_usage {
255 WMI_KEY_USE_PAIRWISE = 0, 249 WMI_KEY_USE_PAIRWISE = 0x00,
256 WMI_KEY_USE_RX_GROUP = 1, 250 WMI_KEY_USE_RX_GROUP = 0x01,
257 WMI_KEY_USE_TX_GROUP = 2, 251 WMI_KEY_USE_TX_GROUP = 0x02,
258}; 252};
259 253
260struct wmi_add_cipher_key_cmd { 254struct wmi_add_cipher_key_cmd {
261 u8 key_index; 255 u8 key_index;
262 u8 key_type; 256 u8 key_type;
263 u8 key_usage; /* enum wmi_key_usage */ 257 /* enum wmi_key_usage */
258 u8 key_usage;
264 u8 key_len; 259 u8 key_len;
265 u8 key_rsc[8]; /* key replay sequence counter */ 260 /* key replay sequence counter */
261 u8 key_rsc[8];
266 u8 key[WMI_MAX_KEY_LEN]; 262 u8 key[WMI_MAX_KEY_LEN];
267 u8 key_op_ctrl; /* Additional Key Control information */ 263 /* Additional Key Control information */
264 u8 key_op_ctrl;
268 u8 mac[WMI_MAC_LEN]; 265 u8 mac[WMI_MAC_LEN];
269} __packed; 266} __packed;
270 267
271/* 268/* WMI_DELETE_CIPHER_KEY_CMDID */
272 * WMI_DELETE_CIPHER_KEY_CMDID
273 */
274struct wmi_delete_cipher_key_cmd { 269struct wmi_delete_cipher_key_cmd {
275 u8 key_index; 270 u8 key_index;
276 u8 mac[WMI_MAC_LEN]; 271 u8 mac[WMI_MAC_LEN];
277} __packed; 272} __packed;
278 273
279/* 274/* WMI_START_SCAN_CMDID
280 * WMI_START_SCAN_CMDID
281 * 275 *
282 * Start L1 scan operation 276 * Start L1 scan operation
283 * 277 *
@@ -286,146 +280,142 @@ struct wmi_delete_cipher_key_cmd {
286 * - WMI_SCAN_COMPLETE_EVENTID 280 * - WMI_SCAN_COMPLETE_EVENTID
287 */ 281 */
288enum wmi_scan_type { 282enum wmi_scan_type {
289 WMI_LONG_SCAN = 0, 283 WMI_ACTIVE_SCAN = 0x00,
290 WMI_SHORT_SCAN = 1, 284 WMI_SHORT_SCAN = 0x01,
291 WMI_PBC_SCAN = 2, 285 WMI_PASSIVE_SCAN = 0x02,
292 WMI_DIRECT_SCAN = 3, 286 WMI_DIRECT_SCAN = 0x03,
293 WMI_ACTIVE_SCAN = 4, 287 WMI_LONG_SCAN = 0x04,
294}; 288};
295 289
290/* WMI_START_SCAN_CMDID */
296struct wmi_start_scan_cmd { 291struct wmi_start_scan_cmd {
297 u8 direct_scan_mac_addr[6]; 292 u8 direct_scan_mac_addr[WMI_MAC_LEN];
298 u8 reserved[2]; 293 /* DMG Beacon frame is transmitted during active scanning */
299 __le32 home_dwell_time; /* Max duration in the home channel(ms) */ 294 u8 discovery_mode;
300 __le32 force_scan_interval; /* Time interval between scans (ms)*/ 295 /* reserved */
301 u8 scan_type; /* wmi_scan_type */ 296 u8 reserved;
302 u8 num_channels; /* how many channels follow */ 297 /* Max duration in the home channel(ms) */
298 __le32 dwell_time;
299 /* Time interval between scans (ms) */
300 __le32 force_scan_interval;
301 /* enum wmi_scan_type */
302 u8 scan_type;
303 /* how many channels follow */
304 u8 num_channels;
305 /* channels ID's:
306 * 0 - 58320 MHz
307 * 1 - 60480 MHz
308 * 2 - 62640 MHz
309 */
303 struct { 310 struct {
304 u8 channel; 311 u8 channel;
305 u8 reserved; 312 u8 reserved;
306 } channel_list[0]; /* channels ID's */ 313 } channel_list[0];
307 /* 0 - 58320 MHz */
308 /* 1 - 60480 MHz */
309 /* 2 - 62640 MHz */
310} __packed; 314} __packed;
311 315
312/* 316/* WMI_SET_PROBED_SSID_CMDID */
313 * WMI_SET_PROBED_SSID_CMDID
314 */
315#define MAX_PROBED_SSID_INDEX (3) 317#define MAX_PROBED_SSID_INDEX (3)
316 318
317enum wmi_ssid_flag { 319enum wmi_ssid_flag {
318 WMI_SSID_FLAG_DISABLE = 0, /* disables entry */ 320 /* disables entry */
319 WMI_SSID_FLAG_SPECIFIC = 1, /* probes specified ssid */ 321 WMI_SSID_FLAG_DISABLE = 0x00,
320 WMI_SSID_FLAG_ANY = 2, /* probes for any ssid */ 322 /* probes specified ssid */
323 WMI_SSID_FLAG_SPECIFIC = 0x01,
324 /* probes for any ssid */
325 WMI_SSID_FLAG_ANY = 0x02,
321}; 326};
322 327
323struct wmi_probed_ssid_cmd { 328struct wmi_probed_ssid_cmd {
324 u8 entry_index; /* 0 to MAX_PROBED_SSID_INDEX */ 329 /* 0 to MAX_PROBED_SSID_INDEX */
325 u8 flag; /* enum wmi_ssid_flag */ 330 u8 entry_index;
331 /* enum wmi_ssid_flag */
332 u8 flag;
326 u8 ssid_len; 333 u8 ssid_len;
327 u8 ssid[WMI_MAX_SSID_LEN]; 334 u8 ssid[WMI_MAX_SSID_LEN];
328} __packed; 335} __packed;
329 336
330/* 337/* WMI_SET_APPIE_CMDID
331 * WMI_SET_APPIE_CMDID
332 * Add Application specified IE to a management frame 338 * Add Application specified IE to a management frame
333 */ 339 */
334#define WMI_MAX_IE_LEN (1024) 340#define WMI_MAX_IE_LEN (1024)
335 341
336/* 342/* Frame Types */
337 * Frame Types
338 */
339enum wmi_mgmt_frame_type { 343enum wmi_mgmt_frame_type {
340 WMI_FRAME_BEACON = 0, 344 WMI_FRAME_BEACON = 0x00,
341 WMI_FRAME_PROBE_REQ = 1, 345 WMI_FRAME_PROBE_REQ = 0x01,
342 WMI_FRAME_PROBE_RESP = 2, 346 WMI_FRAME_PROBE_RESP = 0x02,
343 WMI_FRAME_ASSOC_REQ = 3, 347 WMI_FRAME_ASSOC_REQ = 0x03,
344 WMI_FRAME_ASSOC_RESP = 4, 348 WMI_FRAME_ASSOC_RESP = 0x04,
345 WMI_NUM_MGMT_FRAME, 349 WMI_NUM_MGMT_FRAME = 0x05,
346}; 350};
347 351
348struct wmi_set_appie_cmd { 352struct wmi_set_appie_cmd {
349 u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */ 353 /* enum wmi_mgmt_frame_type */
354 u8 mgmt_frm_type;
350 u8 reserved; 355 u8 reserved;
351 __le16 ie_len; /* Length of the IE to be added to MGMT frame */ 356 /* Length of the IE to be added to MGMT frame */
357 __le16 ie_len;
352 u8 ie_info[0]; 358 u8 ie_info[0];
353} __packed; 359} __packed;
354 360
355/* 361/* WMI_PXMT_RANGE_CFG_CMDID */
356 * WMI_PXMT_RANGE_CFG_CMDID
357 */
358struct wmi_pxmt_range_cfg_cmd { 362struct wmi_pxmt_range_cfg_cmd {
359 u8 dst_mac[WMI_MAC_LEN]; 363 u8 dst_mac[WMI_MAC_LEN];
360 __le16 range; 364 __le16 range;
361} __packed; 365} __packed;
362 366
363/* 367/* WMI_PXMT_SNR2_RANGE_CFG_CMDID */
364 * WMI_PXMT_SNR2_RANGE_CFG_CMDID
365 */
366struct wmi_pxmt_snr2_range_cfg_cmd { 368struct wmi_pxmt_snr2_range_cfg_cmd {
367 s8 snr2range_arr[WMI_PROX_RANGE_NUM-1]; 369 s8 snr2range_arr[2];
368} __packed; 370} __packed;
369 371
370/* 372/* WMI_RF_MGMT_CMDID */
371 * WMI_RF_MGMT_CMDID
372 */
373enum wmi_rf_mgmt_type { 373enum wmi_rf_mgmt_type {
374 WMI_RF_MGMT_W_DISABLE = 0, 374 WMI_RF_MGMT_W_DISABLE = 0x00,
375 WMI_RF_MGMT_W_ENABLE = 1, 375 WMI_RF_MGMT_W_ENABLE = 0x01,
376 WMI_RF_MGMT_GET_STATUS = 2, 376 WMI_RF_MGMT_GET_STATUS = 0x02,
377}; 377};
378 378
379/* WMI_RF_MGMT_CMDID */
379struct wmi_rf_mgmt_cmd { 380struct wmi_rf_mgmt_cmd {
380 __le32 rf_mgmt_type; 381 __le32 rf_mgmt_type;
381} __packed; 382} __packed;
382 383
383/* 384/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
384 * WMI_THERMAL_THROTTLING_CTRL_CMDID
385 */
386#define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF) 385#define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF)
387 386
387/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
388struct wmi_thermal_throttling_ctrl_cmd { 388struct wmi_thermal_throttling_ctrl_cmd {
389 __le32 time_on_usec; 389 __le32 time_on_usec;
390 __le32 time_off_usec; 390 __le32 time_off_usec;
391 __le32 max_txop_length_usec; 391 __le32 max_txop_length_usec;
392} __packed; 392} __packed;
393 393
394/* 394/* WMI_RF_RX_TEST_CMDID */
395 * WMI_RF_RX_TEST_CMDID
396 */
397struct wmi_rf_rx_test_cmd { 395struct wmi_rf_rx_test_cmd {
398 __le32 sector; 396 __le32 sector;
399} __packed; 397} __packed;
400 398
401/* 399/* WMI_CORR_MEASURE_CMDID */
402 * WMI_CORR_MEASURE_CMDID
403 */
404struct wmi_corr_measure_cmd { 400struct wmi_corr_measure_cmd {
405 s32 freq_mhz; 401 __le32 freq_mhz;
406 __le32 length_samples; 402 __le32 length_samples;
407 __le32 iterations; 403 __le32 iterations;
408} __packed; 404} __packed;
409 405
410/* 406/* WMI_SET_SSID_CMDID */
411 * WMI_SET_SSID_CMDID
412 */
413struct wmi_set_ssid_cmd { 407struct wmi_set_ssid_cmd {
414 __le32 ssid_len; 408 __le32 ssid_len;
415 u8 ssid[WMI_MAX_SSID_LEN]; 409 u8 ssid[WMI_MAX_SSID_LEN];
416} __packed; 410} __packed;
417 411
418/* 412/* WMI_SET_PCP_CHANNEL_CMDID */
419 * WMI_SET_PCP_CHANNEL_CMDID
420 */
421struct wmi_set_pcp_channel_cmd { 413struct wmi_set_pcp_channel_cmd {
422 u8 channel; 414 u8 channel;
423 u8 reserved[3]; 415 u8 reserved[3];
424} __packed; 416} __packed;
425 417
426/* 418/* WMI_BCON_CTRL_CMDID */
427 * WMI_BCON_CTRL_CMDID
428 */
429struct wmi_bcon_ctrl_cmd { 419struct wmi_bcon_ctrl_cmd {
430 __le16 bcon_interval; 420 __le16 bcon_interval;
431 __le16 frag_num; 421 __le16 frag_num;
@@ -434,214 +424,192 @@ struct wmi_bcon_ctrl_cmd {
434 u8 pcp_max_assoc_sta; 424 u8 pcp_max_assoc_sta;
435 u8 disable_sec_offload; 425 u8 disable_sec_offload;
436 u8 disable_sec; 426 u8 disable_sec;
427 u8 hidden_ssid;
428 u8 is_go;
429 u8 reserved[2];
437} __packed; 430} __packed;
438 431
439/******* P2P ***********/ 432/* WMI_PORT_ALLOCATE_CMDID */
440
441/*
442 * WMI_PORT_ALLOCATE_CMDID
443 */
444enum wmi_port_role { 433enum wmi_port_role {
445 WMI_PORT_STA = 0, 434 WMI_PORT_STA = 0x00,
446 WMI_PORT_PCP = 1, 435 WMI_PORT_PCP = 0x01,
447 WMI_PORT_AP = 2, 436 WMI_PORT_AP = 0x02,
448 WMI_PORT_P2P_DEV = 3, 437 WMI_PORT_P2P_DEV = 0x03,
449 WMI_PORT_P2P_CLIENT = 4, 438 WMI_PORT_P2P_CLIENT = 0x04,
450 WMI_PORT_P2P_GO = 5, 439 WMI_PORT_P2P_GO = 0x05,
451}; 440};
452 441
442/* WMI_PORT_ALLOCATE_CMDID */
453struct wmi_port_allocate_cmd { 443struct wmi_port_allocate_cmd {
454 u8 mac[WMI_MAC_LEN]; 444 u8 mac[WMI_MAC_LEN];
455 u8 port_role; 445 u8 port_role;
456 u8 mid; 446 u8 mid;
457} __packed; 447} __packed;
458 448
459/* 449/* WMI_PORT_DELETE_CMDID */
460 * WMI_PORT_DELETE_CMDID 450struct wmi_port_delete_cmd {
461 */
462struct wmi_delete_port_cmd {
463 u8 mid; 451 u8 mid;
464 u8 reserved[3]; 452 u8 reserved[3];
465} __packed; 453} __packed;
466 454
467/* 455/* WMI_P2P_CFG_CMDID */
468 * WMI_P2P_CFG_CMDID
469 */
470enum wmi_discovery_mode { 456enum wmi_discovery_mode {
471 WMI_DISCOVERY_MODE_NON_OFFLOAD = 0, 457 WMI_DISCOVERY_MODE_NON_OFFLOAD = 0x00,
472 WMI_DISCOVERY_MODE_OFFLOAD = 1, 458 WMI_DISCOVERY_MODE_OFFLOAD = 0x01,
473 WMI_DISCOVERY_MODE_PEER2PEER = 2, 459 WMI_DISCOVERY_MODE_PEER2PEER = 0x02,
474}; 460};
475 461
476struct wmi_p2p_cfg_cmd { 462struct wmi_p2p_cfg_cmd {
477 u8 discovery_mode; /* wmi_discovery_mode */ 463 /* enum wmi_discovery_mode */
464 u8 discovery_mode;
478 u8 channel; 465 u8 channel;
479 __le16 bcon_interval; /* base to listen/search duration calculation */ 466 /* base to listen/search duration calculation */
467 __le16 bcon_interval;
480} __packed; 468} __packed;
481 469
482/* 470/* WMI_POWER_MGMT_CFG_CMDID */
483 * WMI_POWER_MGMT_CFG_CMDID
484 */
485enum wmi_power_source_type { 471enum wmi_power_source_type {
486 WMI_POWER_SOURCE_BATTERY = 0, 472 WMI_POWER_SOURCE_BATTERY = 0x00,
487 WMI_POWER_SOURCE_OTHER = 1, 473 WMI_POWER_SOURCE_OTHER = 0x01,
488}; 474};
489 475
490struct wmi_power_mgmt_cfg_cmd { 476struct wmi_power_mgmt_cfg_cmd {
491 u8 power_source; /* wmi_power_source_type */ 477 /* enum wmi_power_source_type */
478 u8 power_source;
492 u8 reserved[3]; 479 u8 reserved[3];
493} __packed; 480} __packed;
494 481
495/* 482/* WMI_PCP_START_CMDID */
496 * WMI_PCP_START_CMDID
497 */
498
499enum wmi_hidden_ssid {
500 WMI_HIDDEN_SSID_DISABLED = 0,
501 WMI_HIDDEN_SSID_SEND_EMPTY = 1,
502 WMI_HIDDEN_SSID_CLEAR = 2,
503};
504
505struct wmi_pcp_start_cmd { 483struct wmi_pcp_start_cmd {
506 __le16 bcon_interval; 484 __le16 bcon_interval;
507 u8 pcp_max_assoc_sta; 485 u8 pcp_max_assoc_sta;
508 u8 hidden_ssid; 486 u8 hidden_ssid;
509 u8 reserved0[8]; 487 u8 is_go;
488 u8 reserved0[7];
510 u8 network_type; 489 u8 network_type;
511 u8 channel; 490 u8 channel;
512 u8 disable_sec_offload; 491 u8 disable_sec_offload;
513 u8 disable_sec; 492 u8 disable_sec;
514} __packed; 493} __packed;
515 494
516/* 495/* WMI_SW_TX_REQ_CMDID */
517 * WMI_SW_TX_REQ_CMDID
518 */
519struct wmi_sw_tx_req_cmd { 496struct wmi_sw_tx_req_cmd {
520 u8 dst_mac[WMI_MAC_LEN]; 497 u8 dst_mac[WMI_MAC_LEN];
521 __le16 len; 498 __le16 len;
522 u8 payload[0]; 499 u8 payload[0];
523} __packed; 500} __packed;
524 501
525/*
526 * WMI_VRING_CFG_CMDID
527 */
528
529struct wmi_sw_ring_cfg { 502struct wmi_sw_ring_cfg {
530 __le64 ring_mem_base; 503 __le64 ring_mem_base;
531 __le16 ring_size; 504 __le16 ring_size;
532 __le16 max_mpdu_size; 505 __le16 max_mpdu_size;
533} __packed; 506} __packed;
534 507
508/* wmi_vring_cfg_schd */
535struct wmi_vring_cfg_schd { 509struct wmi_vring_cfg_schd {
536 __le16 priority; 510 __le16 priority;
537 __le16 timeslot_us; 511 __le16 timeslot_us;
538} __packed; 512} __packed;
539 513
540enum wmi_vring_cfg_encap_trans_type { 514enum wmi_vring_cfg_encap_trans_type {
541 WMI_VRING_ENC_TYPE_802_3 = 0, 515 WMI_VRING_ENC_TYPE_802_3 = 0x00,
542 WMI_VRING_ENC_TYPE_NATIVE_WIFI = 1, 516 WMI_VRING_ENC_TYPE_NATIVE_WIFI = 0x01,
543}; 517};
544 518
545enum wmi_vring_cfg_ds_cfg { 519enum wmi_vring_cfg_ds_cfg {
546 WMI_VRING_DS_PBSS = 0, 520 WMI_VRING_DS_PBSS = 0x00,
547 WMI_VRING_DS_STATION = 1, 521 WMI_VRING_DS_STATION = 0x01,
548 WMI_VRING_DS_AP = 2, 522 WMI_VRING_DS_AP = 0x02,
549 WMI_VRING_DS_ADDR4 = 3, 523 WMI_VRING_DS_ADDR4 = 0x03,
550}; 524};
551 525
552enum wmi_vring_cfg_nwifi_ds_trans_type { 526enum wmi_vring_cfg_nwifi_ds_trans_type {
553 WMI_NWIFI_TX_TRANS_MODE_NO = 0, 527 WMI_NWIFI_TX_TRANS_MODE_NO = 0x00,
554 WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 1, 528 WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 0x01,
555 WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 2, 529 WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 0x02,
556}; 530};
557 531
558enum wmi_vring_cfg_schd_params_priority { 532enum wmi_vring_cfg_schd_params_priority {
559 WMI_SCH_PRIO_REGULAR = 0, 533 WMI_SCH_PRIO_REGULAR = 0x00,
560 WMI_SCH_PRIO_HIGH = 1, 534 WMI_SCH_PRIO_HIGH = 0x01,
561}; 535};
562 536
563#define CIDXTID_CID_POS (0) 537#define CIDXTID_CID_POS (0)
564#define CIDXTID_CID_LEN (4) 538#define CIDXTID_CID_LEN (4)
565#define CIDXTID_CID_MSK (0xF) 539#define CIDXTID_CID_MSK (0xF)
566#define CIDXTID_TID_POS (4) 540#define CIDXTID_TID_POS (4)
567#define CIDXTID_TID_LEN (4) 541#define CIDXTID_TID_LEN (4)
568#define CIDXTID_TID_MSK (0xF0) 542#define CIDXTID_TID_MSK (0xF0)
543#define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
544#define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
545#define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
546#define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
547#define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
548#define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
549#define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
550#define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
551#define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
569 552
570struct wmi_vring_cfg { 553struct wmi_vring_cfg {
571 struct wmi_sw_ring_cfg tx_sw_ring; 554 struct wmi_sw_ring_cfg tx_sw_ring;
572 u8 ringid; /* 0-23 vrings */ 555 /* 0-23 vrings */
573 556 u8 ringid;
574 u8 cidxtid; 557 u8 cidxtid;
575
576 u8 encap_trans_type; 558 u8 encap_trans_type;
577 u8 ds_cfg; /* 802.3 DS cfg */ 559 /* 802.3 DS cfg */
560 u8 ds_cfg;
578 u8 nwifi_ds_trans_type; 561 u8 nwifi_ds_trans_type;
579
580 #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
581 #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
582 #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
583 #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
584 #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
585 #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
586 u8 mac_ctrl; 562 u8 mac_ctrl;
587
588 #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
589 #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
590 #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
591 u8 to_resolution; 563 u8 to_resolution;
592 u8 agg_max_wsize; 564 u8 agg_max_wsize;
593 struct wmi_vring_cfg_schd schd_params; 565 struct wmi_vring_cfg_schd schd_params;
594} __packed; 566} __packed;
595 567
596enum wmi_vring_cfg_cmd_action { 568enum wmi_vring_cfg_cmd_action {
597 WMI_VRING_CMD_ADD = 0, 569 WMI_VRING_CMD_ADD = 0x00,
598 WMI_VRING_CMD_MODIFY = 1, 570 WMI_VRING_CMD_MODIFY = 0x01,
599 WMI_VRING_CMD_DELETE = 2, 571 WMI_VRING_CMD_DELETE = 0x02,
600}; 572};
601 573
574/* WMI_VRING_CFG_CMDID */
602struct wmi_vring_cfg_cmd { 575struct wmi_vring_cfg_cmd {
603 __le32 action; 576 __le32 action;
604 struct wmi_vring_cfg vring_cfg; 577 struct wmi_vring_cfg vring_cfg;
605} __packed; 578} __packed;
606 579
607/*
608 * WMI_BCAST_VRING_CFG_CMDID
609 */
610struct wmi_bcast_vring_cfg { 580struct wmi_bcast_vring_cfg {
611 struct wmi_sw_ring_cfg tx_sw_ring; 581 struct wmi_sw_ring_cfg tx_sw_ring;
612 u8 ringid; /* 0-23 vrings */ 582 /* 0-23 vrings */
583 u8 ringid;
613 u8 encap_trans_type; 584 u8 encap_trans_type;
614 u8 ds_cfg; /* 802.3 DS cfg */ 585 /* 802.3 DS cfg */
586 u8 ds_cfg;
615 u8 nwifi_ds_trans_type; 587 u8 nwifi_ds_trans_type;
616} __packed; 588} __packed;
617 589
590/* WMI_BCAST_VRING_CFG_CMDID */
618struct wmi_bcast_vring_cfg_cmd { 591struct wmi_bcast_vring_cfg_cmd {
619 __le32 action; 592 __le32 action;
620 struct wmi_bcast_vring_cfg vring_cfg; 593 struct wmi_bcast_vring_cfg vring_cfg;
621} __packed; 594} __packed;
622 595
623/* 596/* WMI_VRING_BA_EN_CMDID */
624 * WMI_VRING_BA_EN_CMDID
625 */
626struct wmi_vring_ba_en_cmd { 597struct wmi_vring_ba_en_cmd {
627 u8 ringid; 598 u8 ringid;
628 u8 agg_max_wsize; 599 u8 agg_max_wsize;
629 __le16 ba_timeout; 600 __le16 ba_timeout;
630 u8 amsdu; 601 u8 amsdu;
602 u8 reserved[3];
631} __packed; 603} __packed;
632 604
633/* 605/* WMI_VRING_BA_DIS_CMDID */
634 * WMI_VRING_BA_DIS_CMDID
635 */
636struct wmi_vring_ba_dis_cmd { 606struct wmi_vring_ba_dis_cmd {
637 u8 ringid; 607 u8 ringid;
638 u8 reserved; 608 u8 reserved;
639 __le16 reason; 609 __le16 reason;
640} __packed; 610} __packed;
641 611
642/* 612/* WMI_NOTIFY_REQ_CMDID */
643 * WMI_NOTIFY_REQ_CMDID
644 */
645struct wmi_notify_req_cmd { 613struct wmi_notify_req_cmd {
646 u8 cid; 614 u8 cid;
647 u8 year; 615 u8 year;
@@ -654,102 +622,100 @@ struct wmi_notify_req_cmd {
654 u8 miliseconds; 622 u8 miliseconds;
655} __packed; 623} __packed;
656 624
657/* 625/* WMI_CFG_RX_CHAIN_CMDID */
658 * WMI_CFG_RX_CHAIN_CMDID
659 */
660enum wmi_sniffer_cfg_mode { 626enum wmi_sniffer_cfg_mode {
661 WMI_SNIFFER_OFF = 0, 627 WMI_SNIFFER_OFF = 0x00,
662 WMI_SNIFFER_ON = 1, 628 WMI_SNIFFER_ON = 0x01,
663}; 629};
664 630
665enum wmi_sniffer_cfg_phy_info_mode { 631enum wmi_sniffer_cfg_phy_info_mode {
666 WMI_SNIFFER_PHY_INFO_DISABLED = 0, 632 WMI_SNIFFER_PHY_INFO_DISABLED = 0x00,
667 WMI_SNIFFER_PHY_INFO_ENABLED = 1, 633 WMI_SNIFFER_PHY_INFO_ENABLED = 0x01,
668}; 634};
669 635
670enum wmi_sniffer_cfg_phy_support { 636enum wmi_sniffer_cfg_phy_support {
671 WMI_SNIFFER_CP = 0, 637 WMI_SNIFFER_CP = 0x00,
672 WMI_SNIFFER_DP = 1, 638 WMI_SNIFFER_DP = 0x01,
673 WMI_SNIFFER_BOTH_PHYS = 2, 639 WMI_SNIFFER_BOTH_PHYS = 0x02,
674}; 640};
675 641
642/* wmi_sniffer_cfg */
676struct wmi_sniffer_cfg { 643struct wmi_sniffer_cfg {
677 __le32 mode; /* enum wmi_sniffer_cfg_mode */ 644 /* enum wmi_sniffer_cfg_mode */
678 __le32 phy_info_mode; /* enum wmi_sniffer_cfg_phy_info_mode */ 645 __le32 mode;
679 __le32 phy_support; /* enum wmi_sniffer_cfg_phy_support */ 646 /* enum wmi_sniffer_cfg_phy_info_mode */
647 __le32 phy_info_mode;
648 /* enum wmi_sniffer_cfg_phy_support */
649 __le32 phy_support;
680 u8 channel; 650 u8 channel;
681 u8 reserved[3]; 651 u8 reserved[3];
682} __packed; 652} __packed;
683 653
684enum wmi_cfg_rx_chain_cmd_action { 654enum wmi_cfg_rx_chain_cmd_action {
685 WMI_RX_CHAIN_ADD = 0, 655 WMI_RX_CHAIN_ADD = 0x00,
686 WMI_RX_CHAIN_DEL = 1, 656 WMI_RX_CHAIN_DEL = 0x01,
687}; 657};
688 658
689enum wmi_cfg_rx_chain_cmd_decap_trans_type { 659enum wmi_cfg_rx_chain_cmd_decap_trans_type {
690 WMI_DECAP_TYPE_802_3 = 0, 660 WMI_DECAP_TYPE_802_3 = 0x00,
691 WMI_DECAP_TYPE_NATIVE_WIFI = 1, 661 WMI_DECAP_TYPE_NATIVE_WIFI = 0x01,
692 WMI_DECAP_TYPE_NONE = 2, 662 WMI_DECAP_TYPE_NONE = 0x02,
693}; 663};
694 664
695enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type { 665enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
696 WMI_NWIFI_RX_TRANS_MODE_NO = 0, 666 WMI_NWIFI_RX_TRANS_MODE_NO = 0x00,
697 WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 1, 667 WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 0x01,
698 WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2, 668 WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 0x02,
699}; 669};
700 670
701enum wmi_cfg_rx_chain_cmd_reorder_type { 671enum wmi_cfg_rx_chain_cmd_reorder_type {
702 WMI_RX_HW_REORDER = 0, 672 WMI_RX_HW_REORDER = 0x00,
703 WMI_RX_SW_REORDER = 1, 673 WMI_RX_SW_REORDER = 0x01,
704}; 674};
705 675
676#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
677#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
678#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
679#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
680#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
681#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
682#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
683#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
684#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
685#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
686#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
687#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
688#define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
689#define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
690#define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
691#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
692#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
693#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
694#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
695#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
696#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
697#define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
698#define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
699#define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
700#define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
701#define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
702#define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
703#define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
704#define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
705#define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
706
707/* WMI_CFG_RX_CHAIN_CMDID */
706struct wmi_cfg_rx_chain_cmd { 708struct wmi_cfg_rx_chain_cmd {
707 __le32 action; 709 __le32 action;
708 struct wmi_sw_ring_cfg rx_sw_ring; 710 struct wmi_sw_ring_cfg rx_sw_ring;
709 u8 mid; 711 u8 mid;
710 u8 decap_trans_type; 712 u8 decap_trans_type;
711
712 #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
713 #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
714 #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
715 #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
716 #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
717 #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
718 u8 l2_802_3_offload_ctrl; 713 u8 l2_802_3_offload_ctrl;
719
720 #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
721 #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
722 #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
723 #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
724 #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
725 #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
726 u8 l2_nwifi_offload_ctrl; 714 u8 l2_nwifi_offload_ctrl;
727
728 u8 vlan_id; 715 u8 vlan_id;
729 u8 nwifi_ds_trans_type; 716 u8 nwifi_ds_trans_type;
730
731 #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
732 #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
733 #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
734 #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
735 #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
736 #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
737 u8 l3_l4_ctrl; 717 u8 l3_l4_ctrl;
738
739 #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
740 #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
741 #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
742 #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
743 #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
744 #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
745 #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
746 #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
747 #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
748 #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
749 #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
750 #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
751 u8 ring_ctrl; 718 u8 ring_ctrl;
752
753 __le16 prefetch_thrsh; 719 __le16 prefetch_thrsh;
754 __le16 wb_thrsh; 720 __le16 wb_thrsh;
755 __le32 itr_value; 721 __le32 itr_value;
@@ -757,31 +723,27 @@ struct wmi_cfg_rx_chain_cmd {
757 u8 reorder_type; 723 u8 reorder_type;
758 u8 reserved; 724 u8 reserved;
759 struct wmi_sniffer_cfg sniffer_cfg; 725 struct wmi_sniffer_cfg sniffer_cfg;
726 __le16 max_rx_pl_per_desc;
760} __packed; 727} __packed;
761 728
762/* 729/* WMI_RCP_ADDBA_RESP_CMDID */
763 * WMI_RCP_ADDBA_RESP_CMDID
764 */
765struct wmi_rcp_addba_resp_cmd { 730struct wmi_rcp_addba_resp_cmd {
766 u8 cidxtid; 731 u8 cidxtid;
767 u8 dialog_token; 732 u8 dialog_token;
768 __le16 status_code; 733 __le16 status_code;
769 __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */ 734 /* ieee80211_ba_parameterset field to send */
735 __le16 ba_param_set;
770 __le16 ba_timeout; 736 __le16 ba_timeout;
771} __packed; 737} __packed;
772 738
773/* 739/* WMI_RCP_DELBA_CMDID */
774 * WMI_RCP_DELBA_CMDID
775 */
776struct wmi_rcp_delba_cmd { 740struct wmi_rcp_delba_cmd {
777 u8 cidxtid; 741 u8 cidxtid;
778 u8 reserved; 742 u8 reserved;
779 __le16 reason; 743 __le16 reason;
780} __packed; 744} __packed;
781 745
782/* 746/* WMI_RCP_ADDBA_REQ_CMDID */
783 * WMI_RCP_ADDBA_REQ_CMDID
784 */
785struct wmi_rcp_addba_req_cmd { 747struct wmi_rcp_addba_req_cmd {
786 u8 cidxtid; 748 u8 cidxtid;
787 u8 dialog_token; 749 u8 dialog_token;
@@ -792,32 +754,16 @@ struct wmi_rcp_addba_req_cmd {
792 __le16 ba_seq_ctrl; 754 __le16 ba_seq_ctrl;
793} __packed; 755} __packed;
794 756
795/* 757/* WMI_SET_MAC_ADDRESS_CMDID */
796 * WMI_SET_MAC_ADDRESS_CMDID
797 */
798struct wmi_set_mac_address_cmd { 758struct wmi_set_mac_address_cmd {
799 u8 mac[WMI_MAC_LEN]; 759 u8 mac[WMI_MAC_LEN];
800 u8 reserved[2]; 760 u8 reserved[2];
801} __packed; 761} __packed;
802 762
803/* 763/* WMI_ECHO_CMDID
804* WMI_EAPOL_TX_CMDID
805*/
806struct wmi_eapol_tx_cmd {
807 u8 dst_mac[WMI_MAC_LEN];
808 __le16 eapol_len;
809 u8 eapol[0];
810} __packed;
811
812/*
813 * WMI_ECHO_CMDID
814 *
815 * Check FW is alive 764 * Check FW is alive
816 *
817 * WMI_DEEP_ECHO_CMDID 765 * WMI_DEEP_ECHO_CMDID
818 *
819 * Check FW and ucode are alive 766 * Check FW and ucode are alive
820 *
821 * Returned event: WMI_ECHO_RSP_EVENTID 767 * Returned event: WMI_ECHO_RSP_EVENTID
822 * same event for both commands 768 * same event for both commands
823 */ 769 */
@@ -825,70 +771,79 @@ struct wmi_echo_cmd {
825 __le32 value; 771 __le32 value;
826} __packed; 772} __packed;
827 773
828/* 774/* WMI_OTP_READ_CMDID */
829 * WMI_TEMP_SENSE_CMDID 775struct wmi_otp_read_cmd {
776 __le32 addr;
777 __le32 size;
778 __le32 values;
779} __packed;
780
781/* WMI_OTP_WRITE_CMDID */
782struct wmi_otp_write_cmd {
783 __le32 addr;
784 __le32 size;
785 __le32 values;
786} __packed;
787
788/* WMI_TEMP_SENSE_CMDID
830 * 789 *
831 * Measure MAC and radio temperatures 790 * Measure MAC and radio temperatures
791 *
792 * Possible modes for temperature measurement
832 */ 793 */
833
834/* Possible modes for temperature measurement */
835enum wmi_temperature_measure_mode { 794enum wmi_temperature_measure_mode {
836 TEMPERATURE_USE_OLD_VALUE = 0x1, 795 TEMPERATURE_USE_OLD_VALUE = 0x01,
837 TEMPERATURE_MEASURE_NOW = 0x2, 796 TEMPERATURE_MEASURE_NOW = 0x02,
838}; 797};
839 798
799/* WMI_TEMP_SENSE_CMDID */
840struct wmi_temp_sense_cmd { 800struct wmi_temp_sense_cmd {
841 __le32 measure_baseband_en; 801 __le32 measure_baseband_en;
842 __le32 measure_rf_en; 802 __le32 measure_rf_en;
843 __le32 measure_mode; 803 __le32 measure_mode;
844} __packed; 804} __packed;
845 805
846/* 806enum wmi_pmc_op {
847 * WMI_PMC_CMDID 807 WMI_PMC_ALLOCATE = 0x00,
848 */ 808 WMI_PMC_RELEASE = 0x01,
849enum wmi_pmc_op_e {
850 WMI_PMC_ALLOCATE = 0,
851 WMI_PMC_RELEASE = 1,
852}; 809};
853 810
811/* WMI_PMC_CMDID */
854struct wmi_pmc_cmd { 812struct wmi_pmc_cmd {
855 u8 op; /* enum wmi_pmc_cmd_op_type */ 813 /* enum wmi_pmc_cmd_op_type */
814 u8 op;
856 u8 reserved; 815 u8 reserved;
857 __le16 ring_size; 816 __le16 ring_size;
858 __le64 mem_base; 817 __le64 mem_base;
859} __packed; 818} __packed;
860 819
861/* 820/* WMI Events
862 * WMI Events
863 */
864
865/*
866 * List of Events (target to host) 821 * List of Events (target to host)
867 */ 822 */
868enum wmi_event_id { 823enum wmi_event_id {
869 WMI_READY_EVENTID = 0x1001, 824 WMI_READY_EVENTID = 0x1001,
870 WMI_CONNECT_EVENTID = 0x1002, 825 WMI_CONNECT_EVENTID = 0x1002,
871 WMI_DISCONNECT_EVENTID = 0x1003, 826 WMI_DISCONNECT_EVENTID = 0x1003,
872 WMI_SCAN_COMPLETE_EVENTID = 0x100a, 827 WMI_SCAN_COMPLETE_EVENTID = 0x100A,
873 WMI_REPORT_STATISTICS_EVENTID = 0x100b, 828 WMI_REPORT_STATISTICS_EVENTID = 0x100B,
874 WMI_RD_MEM_RSP_EVENTID = 0x1800, 829 WMI_RD_MEM_RSP_EVENTID = 0x1800,
875 WMI_FW_READY_EVENTID = 0x1801, 830 WMI_FW_READY_EVENTID = 0x1801,
876 WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200, 831 WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
877 WMI_ECHO_RSP_EVENTID = 0x1803, 832 WMI_ECHO_RSP_EVENTID = 0x1803,
878 WMI_FS_TUNE_DONE_EVENTID = 0x180a, 833 WMI_FS_TUNE_DONE_EVENTID = 0x180A,
879 WMI_CORR_MEASURE_EVENTID = 0x180b, 834 WMI_CORR_MEASURE_EVENTID = 0x180B,
880 WMI_READ_RSSI_EVENTID = 0x180c, 835 WMI_READ_RSSI_EVENTID = 0x180C,
881 WMI_TEMP_SENSE_DONE_EVENTID = 0x180e, 836 WMI_TEMP_SENSE_DONE_EVENTID = 0x180E,
882 WMI_DC_CALIB_DONE_EVENTID = 0x180f, 837 WMI_DC_CALIB_DONE_EVENTID = 0x180F,
883 WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811, 838 WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
884 WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812, 839 WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
885 WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815, 840 WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
886 WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816, 841 WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
887 WMI_MARLON_R_READ_DONE_EVENTID = 0x1818, 842 WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
888 WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819, 843 WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
889 WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a, 844 WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A,
890 WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d, 845 WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D,
891 WMI_RF_RX_TEST_DONE_EVENTID = 0x181e, 846 WMI_RF_RX_TEST_DONE_EVENTID = 0x181E,
892 WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820, 847 WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
893 WMI_VRING_CFG_DONE_EVENTID = 0x1821, 848 WMI_VRING_CFG_DONE_EVENTID = 0x1821,
894 WMI_BA_STATUS_EVENTID = 0x1823, 849 WMI_BA_STATUS_EVENTID = 0x1823,
@@ -896,15 +851,13 @@ enum wmi_event_id {
896 WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825, 851 WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
897 WMI_DELBA_EVENTID = 0x1826, 852 WMI_DELBA_EVENTID = 0x1826,
898 WMI_GET_SSID_EVENTID = 0x1828, 853 WMI_GET_SSID_EVENTID = 0x1828,
899 WMI_GET_PCP_CHANNEL_EVENTID = 0x182a, 854 WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
900 WMI_SW_TX_COMPLETE_EVENTID = 0x182b, 855 WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
901
902 WMI_READ_MAC_RXQ_EVENTID = 0x1830, 856 WMI_READ_MAC_RXQ_EVENTID = 0x1830,
903 WMI_READ_MAC_TXQ_EVENTID = 0x1831, 857 WMI_READ_MAC_TXQ_EVENTID = 0x1831,
904 WMI_WRITE_MAC_RXQ_EVENTID = 0x1832, 858 WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
905 WMI_WRITE_MAC_TXQ_EVENTID = 0x1833, 859 WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
906 WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834, 860 WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
907
908 WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836, 861 WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
909 WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837, 862 WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
910 WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839, 863 WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
@@ -914,20 +867,18 @@ enum wmi_event_id {
914 WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838, 867 WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
915 WMI_RX_MGMT_PACKET_EVENTID = 0x1840, 868 WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
916 WMI_TX_MGMT_PACKET_EVENTID = 0x1841, 869 WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
917 870 WMI_OTP_READ_RESULT_EVENTID = 0x1856,
918 /* Performance monitoring events */ 871 /* Performance monitoring events */
919 WMI_DATA_PORT_OPEN_EVENTID = 0x1860, 872 WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
920 WMI_WBE_LINK_DOWN_EVENTID = 0x1861, 873 WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
921
922 WMI_BF_CTRL_DONE_EVENTID = 0x1862, 874 WMI_BF_CTRL_DONE_EVENTID = 0x1862,
923 WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863, 875 WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
924 WMI_GET_STATUS_DONE_EVENTID = 0x1864, 876 WMI_GET_STATUS_DONE_EVENTID = 0x1864,
925 WMI_VRING_EN_EVENTID = 0x1865, 877 WMI_VRING_EN_EVENTID = 0x1865,
926
927 WMI_UNIT_TEST_EVENTID = 0x1900, 878 WMI_UNIT_TEST_EVENTID = 0x1900,
928 WMI_FLASH_READ_DONE_EVENTID = 0x1902, 879 WMI_FLASH_READ_DONE_EVENTID = 0x1902,
929 WMI_FLASH_WRITE_DONE_EVENTID = 0x1903, 880 WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
930 /*P2P*/ 881 /* P2P */
931 WMI_P2P_CFG_DONE_EVENTID = 0x1910, 882 WMI_P2P_CFG_DONE_EVENTID = 0x1910,
932 WMI_PORT_ALLOCATED_EVENTID = 0x1911, 883 WMI_PORT_ALLOCATED_EVENTID = 0x1911,
933 WMI_PORT_DELETED_EVENTID = 0x1912, 884 WMI_PORT_DELETED_EVENTID = 0x1912,
@@ -937,49 +888,42 @@ enum wmi_event_id {
937 WMI_DISCOVERY_STOPPED_EVENTID = 0x1917, 888 WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
938 WMI_PCP_STARTED_EVENTID = 0x1918, 889 WMI_PCP_STARTED_EVENTID = 0x1918,
939 WMI_PCP_STOPPED_EVENTID = 0x1919, 890 WMI_PCP_STOPPED_EVENTID = 0x1919,
940 WMI_PCP_FACTOR_EVENTID = 0x191a, 891 WMI_PCP_FACTOR_EVENTID = 0x191A,
941 WMI_SET_CHANNEL_EVENTID = 0x9000, 892 WMI_SET_CHANNEL_EVENTID = 0x9000,
942 WMI_ASSOC_REQ_EVENTID = 0x9001, 893 WMI_ASSOC_REQ_EVENTID = 0x9001,
943 WMI_EAPOL_RX_EVENTID = 0x9002, 894 WMI_EAPOL_RX_EVENTID = 0x9002,
944 WMI_MAC_ADDR_RESP_EVENTID = 0x9003, 895 WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
945 WMI_FW_VER_EVENTID = 0x9004, 896 WMI_FW_VER_EVENTID = 0x9004,
897 WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005,
946}; 898};
947 899
948/* 900/* Events data structures */
949 * Events data structures
950 */
951
952enum wmi_fw_status { 901enum wmi_fw_status {
953 WMI_FW_STATUS_SUCCESS, 902 WMI_FW_STATUS_SUCCESS = 0x00,
954 WMI_FW_STATUS_FAILURE, 903 WMI_FW_STATUS_FAILURE = 0x01,
955}; 904};
956 905
957/* 906/* WMI_RF_MGMT_STATUS_EVENTID */
958 * WMI_RF_MGMT_STATUS_EVENTID
959 */
960enum wmi_rf_status { 907enum wmi_rf_status {
961 WMI_RF_ENABLED = 0, 908 WMI_RF_ENABLED = 0x00,
962 WMI_RF_DISABLED_HW = 1, 909 WMI_RF_DISABLED_HW = 0x01,
963 WMI_RF_DISABLED_SW = 2, 910 WMI_RF_DISABLED_SW = 0x02,
964 WMI_RF_DISABLED_HW_SW = 3, 911 WMI_RF_DISABLED_HW_SW = 0x03,
965}; 912};
966 913
914/* WMI_RF_MGMT_STATUS_EVENTID */
967struct wmi_rf_mgmt_status_event { 915struct wmi_rf_mgmt_status_event {
968 __le32 rf_status; 916 __le32 rf_status;
969} __packed; 917} __packed;
970 918
971/* 919/* WMI_THERMAL_THROTTLING_STATUS_EVENTID */
972 * WMI_THERMAL_THROTTLING_STATUS_EVENTID
973 */
974struct wmi_thermal_throttling_status_event { 920struct wmi_thermal_throttling_status_event {
975 __le32 time_on_usec; 921 __le32 time_on_usec;
976 __le32 time_off_usec; 922 __le32 time_off_usec;
977 __le32 max_txop_length_usec; 923 __le32 max_txop_length_usec;
978} __packed; 924} __packed;
979 925
980/* 926/* WMI_GET_STATUS_DONE_EVENTID */
981 * WMI_GET_STATUS_DONE_EVENTID
982 */
983struct wmi_get_status_done_event { 927struct wmi_get_status_done_event {
984 __le32 is_associated; 928 __le32 is_associated;
985 u8 cid; 929 u8 cid;
@@ -995,9 +939,7 @@ struct wmi_get_status_done_event {
995 __le32 is_secured; 939 __le32 is_secured;
996} __packed; 940} __packed;
997 941
998/* 942/* WMI_FW_VER_EVENTID */
999 * WMI_FW_VER_EVENTID
1000 */
1001struct wmi_fw_ver_event { 943struct wmi_fw_ver_event {
1002 u8 major; 944 u8 major;
1003 u8 minor; 945 u8 minor;
@@ -1005,9 +947,7 @@ struct wmi_fw_ver_event {
1005 __le16 build; 947 __le16 build;
1006} __packed; 948} __packed;
1007 949
1008/* 950/* WMI_MAC_ADDR_RESP_EVENTID */
1009* WMI_MAC_ADDR_RESP_EVENTID
1010*/
1011struct wmi_mac_addr_resp_event { 951struct wmi_mac_addr_resp_event {
1012 u8 mac[WMI_MAC_LEN]; 952 u8 mac[WMI_MAC_LEN];
1013 u8 auth_mode; 953 u8 auth_mode;
@@ -1015,42 +955,38 @@ struct wmi_mac_addr_resp_event {
1015 __le32 offload_mode; 955 __le32 offload_mode;
1016} __packed; 956} __packed;
1017 957
1018/* 958/* WMI_EAPOL_RX_EVENTID */
1019* WMI_EAPOL_RX_EVENTID
1020*/
1021struct wmi_eapol_rx_event { 959struct wmi_eapol_rx_event {
1022 u8 src_mac[WMI_MAC_LEN]; 960 u8 src_mac[WMI_MAC_LEN];
1023 __le16 eapol_len; 961 __le16 eapol_len;
1024 u8 eapol[0]; 962 u8 eapol[0];
1025} __packed; 963} __packed;
1026 964
1027/* 965/* WMI_READY_EVENTID */
1028* WMI_READY_EVENTID
1029*/
1030enum wmi_phy_capability { 966enum wmi_phy_capability {
1031 WMI_11A_CAPABILITY = 1, 967 WMI_11A_CAPABILITY = 0x01,
1032 WMI_11G_CAPABILITY = 2, 968 WMI_11G_CAPABILITY = 0x02,
1033 WMI_11AG_CAPABILITY = 3, 969 WMI_11AG_CAPABILITY = 0x03,
1034 WMI_11NA_CAPABILITY = 4, 970 WMI_11NA_CAPABILITY = 0x04,
1035 WMI_11NG_CAPABILITY = 5, 971 WMI_11NG_CAPABILITY = 0x05,
1036 WMI_11NAG_CAPABILITY = 6, 972 WMI_11NAG_CAPABILITY = 0x06,
1037 WMI_11AD_CAPABILITY = 7, 973 WMI_11AD_CAPABILITY = 0x07,
1038 WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY, 974 WMI_11N_CAPABILITY_OFFSET = 0x03,
1039}; 975};
1040 976
1041struct wmi_ready_event { 977struct wmi_ready_event {
1042 __le32 sw_version; 978 __le32 sw_version;
1043 __le32 abi_version; 979 __le32 abi_version;
1044 u8 mac[WMI_MAC_LEN]; 980 u8 mac[WMI_MAC_LEN];
1045 u8 phy_capability; /* enum wmi_phy_capability */ 981 /* enum wmi_phy_capability */
982 u8 phy_capability;
1046 u8 numof_additional_mids; 983 u8 numof_additional_mids;
1047} __packed; 984} __packed;
1048 985
1049/* 986/* WMI_NOTIFY_REQ_DONE_EVENTID */
1050 * WMI_NOTIFY_REQ_DONE_EVENTID
1051 */
1052struct wmi_notify_req_done_event { 987struct wmi_notify_req_done_event {
1053 __le32 status; /* beamforming status, 0: fail; 1: OK; 2: retrying */ 988 /* beamforming status, 0: fail; 1: OK; 2: retrying */
989 __le32 status;
1054 __le64 tsf; 990 __le64 tsf;
1055 __le32 snr_val; 991 __le32 snr_val;
1056 __le32 tx_tpt; 992 __le32 tx_tpt;
@@ -1066,9 +1002,7 @@ struct wmi_notify_req_done_event {
1066 u8 reserved[3]; 1002 u8 reserved[3];
1067} __packed; 1003} __packed;
1068 1004
1069/* 1005/* WMI_CONNECT_EVENTID */
1070 * WMI_CONNECT_EVENTID
1071 */
1072struct wmi_connect_event { 1006struct wmi_connect_event {
1073 u8 channel; 1007 u8 channel;
1074 u8 reserved0; 1008 u8 reserved0;
@@ -1082,68 +1016,103 @@ struct wmi_connect_event {
1082 u8 assoc_resp_len; 1016 u8 assoc_resp_len;
1083 u8 cid; 1017 u8 cid;
1084 u8 reserved2[3]; 1018 u8 reserved2[3];
1019 /* not in use */
1085 u8 assoc_info[0]; 1020 u8 assoc_info[0];
1086} __packed; 1021} __packed;
1087 1022
1088/* 1023/* WMI_DISCONNECT_EVENTID */
1089 * WMI_DISCONNECT_EVENTID
1090 */
1091enum wmi_disconnect_reason { 1024enum wmi_disconnect_reason {
1092 WMI_DIS_REASON_NO_NETWORK_AVAIL = 1, 1025 WMI_DIS_REASON_NO_NETWORK_AVAIL = 0x01,
1093 WMI_DIS_REASON_LOST_LINK = 2, /* bmiss */ 1026 /* bmiss */
1094 WMI_DIS_REASON_DISCONNECT_CMD = 3, 1027 WMI_DIS_REASON_LOST_LINK = 0x02,
1095 WMI_DIS_REASON_BSS_DISCONNECTED = 4, 1028 WMI_DIS_REASON_DISCONNECT_CMD = 0x03,
1096 WMI_DIS_REASON_AUTH_FAILED = 5, 1029 WMI_DIS_REASON_BSS_DISCONNECTED = 0x04,
1097 WMI_DIS_REASON_ASSOC_FAILED = 6, 1030 WMI_DIS_REASON_AUTH_FAILED = 0x05,
1098 WMI_DIS_REASON_NO_RESOURCES_AVAIL = 7, 1031 WMI_DIS_REASON_ASSOC_FAILED = 0x06,
1099 WMI_DIS_REASON_CSERV_DISCONNECT = 8, 1032 WMI_DIS_REASON_NO_RESOURCES_AVAIL = 0x07,
1100 WMI_DIS_REASON_INVALID_PROFILE = 10, 1033 WMI_DIS_REASON_CSERV_DISCONNECT = 0x08,
1101 WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 11, 1034 WMI_DIS_REASON_INVALID_PROFILE = 0x0A,
1102 WMI_DIS_REASON_PROFILE_MISMATCH = 12, 1035 WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 0x0B,
1103 WMI_DIS_REASON_CONNECTION_EVICTED = 13, 1036 WMI_DIS_REASON_PROFILE_MISMATCH = 0x0C,
1104 WMI_DIS_REASON_IBSS_MERGE = 14, 1037 WMI_DIS_REASON_CONNECTION_EVICTED = 0x0D,
1038 WMI_DIS_REASON_IBSS_MERGE = 0x0E,
1105}; 1039};
1106 1040
1107struct wmi_disconnect_event { 1041struct wmi_disconnect_event {
1108 __le16 protocol_reason_status; /* reason code, see 802.11 spec. */ 1042 /* reason code, see 802.11 spec. */
1109 u8 bssid[WMI_MAC_LEN]; /* set if known */ 1043 __le16 protocol_reason_status;
1110 u8 disconnect_reason; /* see wmi_disconnect_reason */ 1044 /* set if known */
1111 u8 assoc_resp_len; /* not used */ 1045 u8 bssid[WMI_MAC_LEN];
1112 u8 assoc_info[0]; /* not used */ 1046 /* see enum wmi_disconnect_reason */
1047 u8 disconnect_reason;
1048 /* last assoc req may passed to host - not in used */
1049 u8 assoc_resp_len;
1050 /* last assoc req may passed to host - not in used */
1051 u8 assoc_info[0];
1113} __packed; 1052} __packed;
1114 1053
1115/* 1054/* WMI_SCAN_COMPLETE_EVENTID */
1116 * WMI_SCAN_COMPLETE_EVENTID
1117 */
1118enum scan_status { 1055enum scan_status {
1119 WMI_SCAN_SUCCESS = 0, 1056 WMI_SCAN_SUCCESS = 0x00,
1120 WMI_SCAN_FAILED = 1, 1057 WMI_SCAN_FAILED = 0x01,
1121 WMI_SCAN_ABORTED = 2, 1058 WMI_SCAN_ABORTED = 0x02,
1122 WMI_SCAN_REJECTED = 3, 1059 WMI_SCAN_REJECTED = 0x03,
1060 WMI_SCAN_ABORT_REJECTED = 0x04,
1123}; 1061};
1124 1062
1125struct wmi_scan_complete_event { 1063struct wmi_scan_complete_event {
1126 __le32 status; /* scan_status */ 1064 /* enum scan_status */
1065 __le32 status;
1127} __packed; 1066} __packed;
1128 1067
1129/* 1068/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
1130 * WMI_BA_STATUS_EVENTID 1069enum wmi_acs_info_bitmask {
1131 */ 1070 WMI_ACS_INFO_BITMASK_BEACON_FOUND = 0x01,
1071 WMI_ACS_INFO_BITMASK_BUSY_TIME = 0x02,
1072 WMI_ACS_INFO_BITMASK_TX_TIME = 0x04,
1073 WMI_ACS_INFO_BITMASK_RX_TIME = 0x08,
1074 WMI_ACS_INFO_BITMASK_NOISE = 0x10,
1075};
1076
1077struct scan_acs_info {
1078 u8 channel;
1079 u8 beacon_found;
1080 /* msec */
1081 __le16 busy_time;
1082 __le16 tx_time;
1083 __le16 rx_time;
1084 u8 noise;
1085 u8 reserved[3];
1086} __packed;
1087
1088struct wmi_acs_passive_scan_complete_event {
1089 __le32 dwell_time;
1090 /* valid fields within channel info according to
1091 * their appearance in struct order
1092 */
1093 __le16 filled;
1094 u8 num_scanned_channels;
1095 u8 reserved;
1096 struct scan_acs_info scan_info_list[0];
1097} __packed;
1098
1099/* WMI_BA_STATUS_EVENTID */
1132enum wmi_vring_ba_status { 1100enum wmi_vring_ba_status {
1133 WMI_BA_AGREED = 0, 1101 WMI_BA_AGREED = 0x00,
1134 WMI_BA_NON_AGREED = 1, 1102 WMI_BA_NON_AGREED = 0x01,
1135 /* BA_EN in middle of teardown flow */ 1103 /* BA_EN in middle of teardown flow */
1136 WMI_BA_TD_WIP = 2, 1104 WMI_BA_TD_WIP = 0x02,
1137 /* BA_DIS or BA_EN in middle of BA SETUP flow */ 1105 /* BA_DIS or BA_EN in middle of BA SETUP flow */
1138 WMI_BA_SETUP_WIP = 3, 1106 WMI_BA_SETUP_WIP = 0x03,
1139 /* BA_EN when the BA session is already active */ 1107 /* BA_EN when the BA session is already active */
1140 WMI_BA_SESSION_ACTIVE = 4, 1108 WMI_BA_SESSION_ACTIVE = 0x04,
1141 /* BA_DIS when the BA session is not active */ 1109 /* BA_DIS when the BA session is not active */
1142 WMI_BA_SESSION_NOT_ACTIVE = 5, 1110 WMI_BA_SESSION_NOT_ACTIVE = 0x05,
1143}; 1111};
1144 1112
1145struct wmi_vring_ba_status_event { 1113struct wmi_ba_status_event {
1146 __le16 status; /* enum wmi_vring_ba_status */ 1114 /* enum wmi_vring_ba_status */
1115 __le16 status;
1147 u8 reserved[2]; 1116 u8 reserved[2];
1148 u8 ringid; 1117 u8 ringid;
1149 u8 agg_wsize; 1118 u8 agg_wsize;
@@ -1151,18 +1120,14 @@ struct wmi_vring_ba_status_event {
1151 u8 amsdu; 1120 u8 amsdu;
1152} __packed; 1121} __packed;
1153 1122
1154/* 1123/* WMI_DELBA_EVENTID */
1155 * WMI_DELBA_EVENTID
1156 */
1157struct wmi_delba_event { 1124struct wmi_delba_event {
1158 u8 cidxtid; 1125 u8 cidxtid;
1159 u8 from_initiator; 1126 u8 from_initiator;
1160 __le16 reason; 1127 __le16 reason;
1161} __packed; 1128} __packed;
1162 1129
1163/* 1130/* WMI_VRING_CFG_DONE_EVENTID */
1164 * WMI_VRING_CFG_DONE_EVENTID
1165 */
1166struct wmi_vring_cfg_done_event { 1131struct wmi_vring_cfg_done_event {
1167 u8 ringid; 1132 u8 ringid;
1168 u8 status; 1133 u8 status;
@@ -1170,174 +1135,151 @@ struct wmi_vring_cfg_done_event {
1170 __le32 tx_vring_tail_ptr; 1135 __le32 tx_vring_tail_ptr;
1171} __packed; 1136} __packed;
1172 1137
1173/* 1138/* WMI_RCP_ADDBA_RESP_SENT_EVENTID */
1174 * WMI_RCP_ADDBA_RESP_SENT_EVENTID
1175 */
1176struct wmi_rcp_addba_resp_sent_event { 1139struct wmi_rcp_addba_resp_sent_event {
1177 u8 cidxtid; 1140 u8 cidxtid;
1178 u8 reserved; 1141 u8 reserved;
1179 __le16 status; 1142 __le16 status;
1180} __packed; 1143} __packed;
1181 1144
1182/* 1145/* WMI_RCP_ADDBA_REQ_EVENTID */
1183 * WMI_RCP_ADDBA_REQ_EVENTID
1184 */
1185struct wmi_rcp_addba_req_event { 1146struct wmi_rcp_addba_req_event {
1186 u8 cidxtid; 1147 u8 cidxtid;
1187 u8 dialog_token; 1148 u8 dialog_token;
1188 __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */ 1149 /* ieee80211_ba_parameterset as it received */
1150 __le16 ba_param_set;
1189 __le16 ba_timeout; 1151 __le16 ba_timeout;
1190 __le16 ba_seq_ctrl; /* ieee80211_ba_seqstrl field as it received */ 1152 /* ieee80211_ba_seqstrl field as it received */
1153 __le16 ba_seq_ctrl;
1191} __packed; 1154} __packed;
1192 1155
1193/* 1156/* WMI_CFG_RX_CHAIN_DONE_EVENTID */
1194 * WMI_CFG_RX_CHAIN_DONE_EVENTID
1195 */
1196enum wmi_cfg_rx_chain_done_event_status { 1157enum wmi_cfg_rx_chain_done_event_status {
1197 WMI_CFG_RX_CHAIN_SUCCESS = 1, 1158 WMI_CFG_RX_CHAIN_SUCCESS = 0x01,
1198}; 1159};
1199 1160
1200struct wmi_cfg_rx_chain_done_event { 1161struct wmi_cfg_rx_chain_done_event {
1201 __le32 rx_ring_tail_ptr; /* Rx V-Ring Tail pointer */ 1162 /* V-Ring Tail pointer */
1163 __le32 rx_ring_tail_ptr;
1202 __le32 status; 1164 __le32 status;
1203} __packed; 1165} __packed;
1204 1166
1205/* 1167/* WMI_WBE_LINK_DOWN_EVENTID */
1206 * WMI_WBE_LINK_DOWN_EVENTID
1207 */
1208enum wmi_wbe_link_down_event_reason { 1168enum wmi_wbe_link_down_event_reason {
1209 WMI_WBE_REASON_USER_REQUEST = 0, 1169 WMI_WBE_REASON_USER_REQUEST = 0x00,
1210 WMI_WBE_REASON_RX_DISASSOC = 1, 1170 WMI_WBE_REASON_RX_DISASSOC = 0x01,
1211 WMI_WBE_REASON_BAD_PHY_LINK = 2, 1171 WMI_WBE_REASON_BAD_PHY_LINK = 0x02,
1212}; 1172};
1213 1173
1174/* WMI_WBE_LINK_DOWN_EVENTID */
1214struct wmi_wbe_link_down_event { 1175struct wmi_wbe_link_down_event {
1215 u8 cid; 1176 u8 cid;
1216 u8 reserved[3]; 1177 u8 reserved[3];
1217 __le32 reason; 1178 __le32 reason;
1218} __packed; 1179} __packed;
1219 1180
1220/* 1181/* WMI_DATA_PORT_OPEN_EVENTID */
1221 * WMI_DATA_PORT_OPEN_EVENTID
1222 */
1223struct wmi_data_port_open_event { 1182struct wmi_data_port_open_event {
1224 u8 cid; 1183 u8 cid;
1225 u8 reserved[3]; 1184 u8 reserved[3];
1226} __packed; 1185} __packed;
1227 1186
1228/* 1187/* WMI_VRING_EN_EVENTID */
1229 * WMI_VRING_EN_EVENTID
1230 */
1231struct wmi_vring_en_event { 1188struct wmi_vring_en_event {
1232 u8 vring_index; 1189 u8 vring_index;
1233 u8 reserved[3]; 1190 u8 reserved[3];
1234} __packed; 1191} __packed;
1235 1192
1236/* 1193/* WMI_GET_PCP_CHANNEL_EVENTID */
1237 * WMI_GET_PCP_CHANNEL_EVENTID
1238 */
1239struct wmi_get_pcp_channel_event { 1194struct wmi_get_pcp_channel_event {
1240 u8 channel; 1195 u8 channel;
1241 u8 reserved[3]; 1196 u8 reserved[3];
1242} __packed; 1197} __packed;
1243 1198
1244/* 1199/* WMI_P2P_CFG_DONE_EVENTID */
1245 * WMI_P2P_CFG_DONE_EVENTID
1246 */
1247struct wmi_p2p_cfg_done_event { 1200struct wmi_p2p_cfg_done_event {
1248 u8 status; /* wmi_fw_status */ 1201 /* wmi_fw_status */
1202 u8 status;
1249 u8 reserved[3]; 1203 u8 reserved[3];
1250} __packed; 1204} __packed;
1251 1205
1252/* 1206/* WMI_PORT_ALLOCATED_EVENTID */
1253* WMI_PORT_ALLOCATED_EVENTID
1254*/
1255struct wmi_port_allocated_event { 1207struct wmi_port_allocated_event {
1256 u8 status; /* wmi_fw_status */ 1208 /* wmi_fw_status */
1209 u8 status;
1257 u8 reserved[3]; 1210 u8 reserved[3];
1258} __packed; 1211} __packed;
1259 1212
1260/* 1213/* WMI_PORT_DELETED_EVENTID */
1261* WMI_PORT_DELETED_EVENTID
1262*/
1263struct wmi_port_deleted_event { 1214struct wmi_port_deleted_event {
1264 u8 status; /* wmi_fw_status */ 1215 /* wmi_fw_status */
1216 u8 status;
1265 u8 reserved[3]; 1217 u8 reserved[3];
1266} __packed; 1218} __packed;
1267 1219
1268/* 1220/* WMI_LISTEN_STARTED_EVENTID */
1269 * WMI_LISTEN_STARTED_EVENTID
1270 */
1271struct wmi_listen_started_event { 1221struct wmi_listen_started_event {
1272 u8 status; /* wmi_fw_status */ 1222 /* wmi_fw_status */
1223 u8 status;
1273 u8 reserved[3]; 1224 u8 reserved[3];
1274} __packed; 1225} __packed;
1275 1226
1276/* 1227/* WMI_SEARCH_STARTED_EVENTID */
1277 * WMI_SEARCH_STARTED_EVENTID
1278 */
1279struct wmi_search_started_event { 1228struct wmi_search_started_event {
1280 u8 status; /* wmi_fw_status */ 1229 /* wmi_fw_status */
1230 u8 status;
1281 u8 reserved[3]; 1231 u8 reserved[3];
1282} __packed; 1232} __packed;
1283 1233
1284/* 1234/* WMI_PCP_STARTED_EVENTID */
1285 * WMI_PCP_STARTED_EVENTID
1286 */
1287struct wmi_pcp_started_event { 1235struct wmi_pcp_started_event {
1288 u8 status; /* wmi_fw_status */ 1236 /* wmi_fw_status */
1237 u8 status;
1289 u8 reserved[3]; 1238 u8 reserved[3];
1290} __packed; 1239} __packed;
1291 1240
1292/* 1241/* WMI_PCP_FACTOR_EVENTID */
1293 * WMI_PCP_FACTOR_EVENTID
1294 */
1295struct wmi_pcp_factor_event { 1242struct wmi_pcp_factor_event {
1296 __le32 pcp_factor; 1243 __le32 pcp_factor;
1297} __packed; 1244} __packed;
1298 1245
1299/*
1300 * WMI_SW_TX_COMPLETE_EVENTID
1301 */
1302enum wmi_sw_tx_status { 1246enum wmi_sw_tx_status {
1303 WMI_TX_SW_STATUS_SUCCESS = 0, 1247 WMI_TX_SW_STATUS_SUCCESS = 0x00,
1304 WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 1, 1248 WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 0x01,
1305 WMI_TX_SW_STATUS_FAILED_TX = 2, 1249 WMI_TX_SW_STATUS_FAILED_TX = 0x02,
1306}; 1250};
1307 1251
1252/* WMI_SW_TX_COMPLETE_EVENTID */
1308struct wmi_sw_tx_complete_event { 1253struct wmi_sw_tx_complete_event {
1309 u8 status; /* enum wmi_sw_tx_status */ 1254 /* enum wmi_sw_tx_status */
1255 u8 status;
1310 u8 reserved[3]; 1256 u8 reserved[3];
1311} __packed; 1257} __packed;
1312 1258
1313/* 1259/* WMI_CORR_MEASURE_EVENTID */
1314 * WMI_CORR_MEASURE_EVENTID
1315 */
1316struct wmi_corr_measure_event { 1260struct wmi_corr_measure_event {
1317 s32 i; 1261 /* signed */
1318 s32 q; 1262 __le32 i;
1319 s32 image_i; 1263 /* signed */
1320 s32 image_q; 1264 __le32 q;
1265 /* signed */
1266 __le32 image_i;
1267 /* signed */
1268 __le32 image_q;
1321} __packed; 1269} __packed;
1322 1270
1323/* 1271/* WMI_READ_RSSI_EVENTID */
1324 * WMI_READ_RSSI_EVENTID
1325 */
1326struct wmi_read_rssi_event { 1272struct wmi_read_rssi_event {
1327 __le32 ina_rssi_adc_dbm; 1273 __le32 ina_rssi_adc_dbm;
1328} __packed; 1274} __packed;
1329 1275
1330/* 1276/* WMI_GET_SSID_EVENTID */
1331 * WMI_GET_SSID_EVENTID
1332 */
1333struct wmi_get_ssid_event { 1277struct wmi_get_ssid_event {
1334 __le32 ssid_len; 1278 __le32 ssid_len;
1335 u8 ssid[WMI_MAX_SSID_LEN]; 1279 u8 ssid[WMI_MAX_SSID_LEN];
1336} __packed; 1280} __packed;
1337 1281
1338/* 1282/* wmi_rx_mgmt_info */
1339 * WMI_RX_MGMT_PACKET_EVENTID
1340 */
1341struct wmi_rx_mgmt_info { 1283struct wmi_rx_mgmt_info {
1342 u8 mcs; 1284 u8 mcs;
1343 s8 snr; 1285 s8 snr;
@@ -1346,39 +1288,65 @@ struct wmi_rx_mgmt_info {
1346 __le16 stype; 1288 __le16 stype;
1347 __le16 status; 1289 __le16 status;
1348 __le32 len; 1290 __le32 len;
1291 /* Not resolved when == 0xFFFFFFFF ==> Broadcast to all MIDS */
1349 u8 qid; 1292 u8 qid;
1293 /* Not resolved when == 0xFFFFFFFF ==> Broadcast to all MIDS */
1350 u8 mid; 1294 u8 mid;
1351 u8 cid; 1295 u8 cid;
1352 u8 channel; /* From Radio MNGR */ 1296 /* From Radio MNGR */
1297 u8 channel;
1353} __packed; 1298} __packed;
1354 1299
1355/* 1300/* wmi_otp_read_write_cmd */
1356 * WMI_TX_MGMT_PACKET_EVENTID 1301struct wmi_otp_read_write_cmd {
1357 */ 1302 __le32 addr;
1303 __le32 size;
1304 u8 values[0];
1305} __packed;
1306
1307/* WMI_OTP_READ_RESULT_EVENTID */
1308struct wmi_otp_read_result_event {
1309 u8 payload[0];
1310} __packed;
1311
1312/* WMI_TX_MGMT_PACKET_EVENTID */
1358struct wmi_tx_mgmt_packet_event { 1313struct wmi_tx_mgmt_packet_event {
1359 u8 payload[0]; 1314 u8 payload[0];
1360} __packed; 1315} __packed;
1361 1316
1317/* WMI_RX_MGMT_PACKET_EVENTID */
1362struct wmi_rx_mgmt_packet_event { 1318struct wmi_rx_mgmt_packet_event {
1363 struct wmi_rx_mgmt_info info; 1319 struct wmi_rx_mgmt_info info;
1364 u8 payload[0]; 1320 u8 payload[0];
1365} __packed; 1321} __packed;
1366 1322
1367/* 1323/* WMI_ECHO_RSP_EVENTID */
1368 * WMI_ECHO_RSP_EVENTID 1324struct wmi_echo_rsp_event {
1369 */
1370struct wmi_echo_event {
1371 __le32 echoed_value; 1325 __le32 echoed_value;
1372} __packed; 1326} __packed;
1373 1327
1374/* 1328/* WMI_TEMP_SENSE_DONE_EVENTID
1375 * WMI_TEMP_SENSE_DONE_EVENTID
1376 * 1329 *
1377 * Measure MAC and radio temperatures 1330 * Measure MAC and radio temperatures
1378 */ 1331 */
1379struct wmi_temp_sense_done_event { 1332struct wmi_temp_sense_done_event {
1333 /* Temperature times 1000 (actual temperature will be achieved by
1334 * dividing the value by 1000)
1335 */
1380 __le32 baseband_t1000; 1336 __le32 baseband_t1000;
1337 /* Temperature times 1000 (actual temperature will be achieved by
1338 * dividing the value by 1000)
1339 */
1381 __le32 rf_t1000; 1340 __le32 rf_t1000;
1382} __packed; 1341} __packed;
1383 1342
1343#define WMI_SCAN_DWELL_TIME_MS (100)
1344#define WMI_SURVEY_TIMEOUT_MS (10000)
1345
1346enum wmi_hidden_ssid {
1347 WMI_HIDDEN_SSID_DISABLED = 0x00,
1348 WMI_HIDDEN_SSID_SEND_EMPTY = 0x10,
1349 WMI_HIDDEN_SSID_CLEAR = 0xFE,
1350};
1351
1384#endif /* __WILOCITY_WMI_H__ */ 1352#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 1efb1d66e0b7..7c108047fb46 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -1547,7 +1547,7 @@ static inline int at76_guess_freq(struct at76_priv *priv)
1547 channel = el[2]; 1547 channel = el[2];
1548 1548
1549exit: 1549exit:
1550 return ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); 1550 return ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
1551} 1551}
1552 1552
1553static void at76_rx_tasklet(unsigned long param) 1553static void at76_rx_tasklet(unsigned long param)
@@ -1590,7 +1590,7 @@ static void at76_rx_tasklet(unsigned long param)
1590 rx_status.signal = buf->rssi; 1590 rx_status.signal = buf->rssi;
1591 rx_status.flag |= RX_FLAG_DECRYPTED; 1591 rx_status.flag |= RX_FLAG_DECRYPTED;
1592 rx_status.flag |= RX_FLAG_IV_STRIPPED; 1592 rx_status.flag |= RX_FLAG_IV_STRIPPED;
1593 rx_status.band = IEEE80211_BAND_2GHZ; 1593 rx_status.band = NL80211_BAND_2GHZ;
1594 rx_status.freq = at76_guess_freq(priv); 1594 rx_status.freq = at76_guess_freq(priv);
1595 1595
1596 at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d", 1596 at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d",
@@ -2359,7 +2359,7 @@ static int at76_init_new_device(struct at76_priv *priv,
2359 priv->hw->wiphy->max_scan_ssids = 1; 2359 priv->hw->wiphy->max_scan_ssids = 1;
2360 priv->hw->wiphy->max_scan_ie_len = 0; 2360 priv->hw->wiphy->max_scan_ie_len = 0;
2361 priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 2361 priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
2362 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band; 2362 priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = &at76_supported_band;
2363 ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS); 2363 ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
2364 ieee80211_hw_set(priv->hw, SIGNAL_UNSPEC); 2364 ieee80211_hw_set(priv->hw, SIGNAL_UNSPEC);
2365 priv->hw->max_signal = 100; 2365 priv->hw->max_signal = 100;
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index 6a1f03c271c1..8f8f37f3a00c 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -2434,7 +2434,7 @@ static int atmel_get_range(struct net_device *dev,
2434 2434
2435 /* Values in MHz -> * 10^5 * 10 */ 2435 /* Values in MHz -> * 10^5 * 10 */
2436 range->freq[k].m = 100000 * 2436 range->freq[k].m = 100000 *
2437 ieee80211_channel_to_frequency(i, IEEE80211_BAND_2GHZ); 2437 ieee80211_channel_to_frequency(i, NL80211_BAND_2GHZ);
2438 range->freq[k++].e = 1; 2438 range->freq[k++].e = 1;
2439 } 2439 }
2440 range->num_frequency = k; 2440 range->num_frequency = k;
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index 036552439816..d7d42f0b80c3 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -992,9 +992,9 @@ static inline int b43_is_mode(struct b43_wl *wl, int type)
992 992
993/** 993/**
994 * b43_current_band - Returns the currently used band. 994 * b43_current_band - Returns the currently used band.
995 * Returns one of IEEE80211_BAND_2GHZ and IEEE80211_BAND_5GHZ. 995 * Returns one of NL80211_BAND_2GHZ and NL80211_BAND_5GHZ.
996 */ 996 */
997static inline enum ieee80211_band b43_current_band(struct b43_wl *wl) 997static inline enum nl80211_band b43_current_band(struct b43_wl *wl)
998{ 998{
999 return wl->hw->conf.chandef.chan->band; 999 return wl->hw->conf.chandef.chan->band;
1000} 1000}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 72380af9dc52..4ee5c5853f9f 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -187,7 +187,7 @@ static struct ieee80211_rate __b43_ratetable[] = {
187#define b43_g_ratetable_size 12 187#define b43_g_ratetable_size 12
188 188
189#define CHAN2G(_channel, _freq, _flags) { \ 189#define CHAN2G(_channel, _freq, _flags) { \
190 .band = IEEE80211_BAND_2GHZ, \ 190 .band = NL80211_BAND_2GHZ, \
191 .center_freq = (_freq), \ 191 .center_freq = (_freq), \
192 .hw_value = (_channel), \ 192 .hw_value = (_channel), \
193 .flags = (_flags), \ 193 .flags = (_flags), \
@@ -216,7 +216,7 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
216#undef CHAN2G 216#undef CHAN2G
217 217
218#define CHAN4G(_channel, _flags) { \ 218#define CHAN4G(_channel, _flags) { \
219 .band = IEEE80211_BAND_5GHZ, \ 219 .band = NL80211_BAND_5GHZ, \
220 .center_freq = 4000 + (5 * (_channel)), \ 220 .center_freq = 4000 + (5 * (_channel)), \
221 .hw_value = (_channel), \ 221 .hw_value = (_channel), \
222 .flags = (_flags), \ 222 .flags = (_flags), \
@@ -224,7 +224,7 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
224 .max_power = 30, \ 224 .max_power = 30, \
225} 225}
226#define CHAN5G(_channel, _flags) { \ 226#define CHAN5G(_channel, _flags) { \
227 .band = IEEE80211_BAND_5GHZ, \ 227 .band = NL80211_BAND_5GHZ, \
228 .center_freq = 5000 + (5 * (_channel)), \ 228 .center_freq = 5000 + (5 * (_channel)), \
229 .hw_value = (_channel), \ 229 .hw_value = (_channel), \
230 .flags = (_flags), \ 230 .flags = (_flags), \
@@ -323,7 +323,7 @@ static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
323#undef CHAN5G 323#undef CHAN5G
324 324
325static struct ieee80211_supported_band b43_band_5GHz_nphy = { 325static struct ieee80211_supported_band b43_band_5GHz_nphy = {
326 .band = IEEE80211_BAND_5GHZ, 326 .band = NL80211_BAND_5GHZ,
327 .channels = b43_5ghz_nphy_chantable, 327 .channels = b43_5ghz_nphy_chantable,
328 .n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable), 328 .n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable),
329 .bitrates = b43_a_ratetable, 329 .bitrates = b43_a_ratetable,
@@ -331,7 +331,7 @@ static struct ieee80211_supported_band b43_band_5GHz_nphy = {
331}; 331};
332 332
333static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = { 333static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = {
334 .band = IEEE80211_BAND_5GHZ, 334 .band = NL80211_BAND_5GHZ,
335 .channels = b43_5ghz_nphy_chantable_limited, 335 .channels = b43_5ghz_nphy_chantable_limited,
336 .n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable_limited), 336 .n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable_limited),
337 .bitrates = b43_a_ratetable, 337 .bitrates = b43_a_ratetable,
@@ -339,7 +339,7 @@ static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = {
339}; 339};
340 340
341static struct ieee80211_supported_band b43_band_5GHz_aphy = { 341static struct ieee80211_supported_band b43_band_5GHz_aphy = {
342 .band = IEEE80211_BAND_5GHZ, 342 .band = NL80211_BAND_5GHZ,
343 .channels = b43_5ghz_aphy_chantable, 343 .channels = b43_5ghz_aphy_chantable,
344 .n_channels = ARRAY_SIZE(b43_5ghz_aphy_chantable), 344 .n_channels = ARRAY_SIZE(b43_5ghz_aphy_chantable),
345 .bitrates = b43_a_ratetable, 345 .bitrates = b43_a_ratetable,
@@ -347,7 +347,7 @@ static struct ieee80211_supported_band b43_band_5GHz_aphy = {
347}; 347};
348 348
349static struct ieee80211_supported_band b43_band_2GHz = { 349static struct ieee80211_supported_band b43_band_2GHz = {
350 .band = IEEE80211_BAND_2GHZ, 350 .band = NL80211_BAND_2GHZ,
351 .channels = b43_2ghz_chantable, 351 .channels = b43_2ghz_chantable,
352 .n_channels = ARRAY_SIZE(b43_2ghz_chantable), 352 .n_channels = ARRAY_SIZE(b43_2ghz_chantable),
353 .bitrates = b43_g_ratetable, 353 .bitrates = b43_g_ratetable,
@@ -355,7 +355,7 @@ static struct ieee80211_supported_band b43_band_2GHz = {
355}; 355};
356 356
357static struct ieee80211_supported_band b43_band_2ghz_limited = { 357static struct ieee80211_supported_band b43_band_2ghz_limited = {
358 .band = IEEE80211_BAND_2GHZ, 358 .band = NL80211_BAND_2GHZ,
359 .channels = b43_2ghz_chantable, 359 .channels = b43_2ghz_chantable,
360 .n_channels = b43_2ghz_chantable_limited_size, 360 .n_channels = b43_2ghz_chantable_limited_size,
361 .bitrates = b43_g_ratetable, 361 .bitrates = b43_g_ratetable,
@@ -717,7 +717,7 @@ static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
717{ 717{
718 /* slot_time is in usec. */ 718 /* slot_time is in usec. */
719 /* This test used to exit for all but a G PHY. */ 719 /* This test used to exit for all but a G PHY. */
720 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 720 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
721 return; 721 return;
722 b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time); 722 b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
723 /* Shared memory location 0x0010 is the slot time and should be 723 /* Shared memory location 0x0010 is the slot time and should be
@@ -3880,12 +3880,12 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw,
3880 mutex_unlock(&wl->mutex); 3880 mutex_unlock(&wl->mutex);
3881} 3881}
3882 3882
3883static const char *band_to_string(enum ieee80211_band band) 3883static const char *band_to_string(enum nl80211_band band)
3884{ 3884{
3885 switch (band) { 3885 switch (band) {
3886 case IEEE80211_BAND_5GHZ: 3886 case NL80211_BAND_5GHZ:
3887 return "5"; 3887 return "5";
3888 case IEEE80211_BAND_2GHZ: 3888 case NL80211_BAND_2GHZ:
3889 return "2.4"; 3889 return "2.4";
3890 default: 3890 default:
3891 break; 3891 break;
@@ -3903,10 +3903,10 @@ static int b43_switch_band(struct b43_wldev *dev,
3903 u32 tmp; 3903 u32 tmp;
3904 3904
3905 switch (chan->band) { 3905 switch (chan->band) {
3906 case IEEE80211_BAND_5GHZ: 3906 case NL80211_BAND_5GHZ:
3907 gmode = false; 3907 gmode = false;
3908 break; 3908 break;
3909 case IEEE80211_BAND_2GHZ: 3909 case NL80211_BAND_2GHZ:
3910 gmode = true; 3910 gmode = true;
3911 break; 3911 break;
3912 default: 3912 default:
@@ -5294,16 +5294,16 @@ static int b43_setup_bands(struct b43_wldev *dev,
5294 phy->radio_rev == 9; 5294 phy->radio_rev == 9;
5295 5295
5296 if (have_2ghz_phy) 5296 if (have_2ghz_phy)
5297 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = limited_2g ? 5297 hw->wiphy->bands[NL80211_BAND_2GHZ] = limited_2g ?
5298 &b43_band_2ghz_limited : &b43_band_2GHz; 5298 &b43_band_2ghz_limited : &b43_band_2GHz;
5299 if (dev->phy.type == B43_PHYTYPE_N) { 5299 if (dev->phy.type == B43_PHYTYPE_N) {
5300 if (have_5ghz_phy) 5300 if (have_5ghz_phy)
5301 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = limited_5g ? 5301 hw->wiphy->bands[NL80211_BAND_5GHZ] = limited_5g ?
5302 &b43_band_5GHz_nphy_limited : 5302 &b43_band_5GHz_nphy_limited :
5303 &b43_band_5GHz_nphy; 5303 &b43_band_5GHz_nphy;
5304 } else { 5304 } else {
5305 if (have_5ghz_phy) 5305 if (have_5ghz_phy)
5306 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_aphy; 5306 hw->wiphy->bands[NL80211_BAND_5GHZ] = &b43_band_5GHz_aphy;
5307 } 5307 }
5308 5308
5309 dev->phy.supports_2ghz = have_2ghz_phy; 5309 dev->phy.supports_2ghz = have_2ghz_phy;
@@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
5680 INIT_WORK(&wl->firmware_load, b43_request_firmware); 5680 INIT_WORK(&wl->firmware_load, b43_request_firmware);
5681 schedule_work(&wl->firmware_load); 5681 schedule_work(&wl->firmware_load);
5682 5682
5683bcma_out:
5684 return err; 5683 return err;
5685 5684
5686bcma_err_wireless_exit: 5685bcma_err_wireless_exit:
5687 ieee80211_free_hw(wl->hw); 5686 ieee80211_free_hw(wl->hw);
5687bcma_out:
5688 kfree(dev);
5688 return err; 5689 return err;
5689} 5690}
5690 5691
@@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
5712 b43_rng_exit(wl); 5713 b43_rng_exit(wl);
5713 5714
5714 b43_leds_unregister(wl); 5715 b43_leds_unregister(wl);
5715
5716 ieee80211_free_hw(wl->hw); 5716 ieee80211_free_hw(wl->hw);
5717 kfree(wldev->dev);
5717} 5718}
5718 5719
5719static struct bcma_driver b43_bcma_driver = { 5720static struct bcma_driver b43_bcma_driver = {
@@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5796 5797
5797 b43_leds_unregister(wl); 5798 b43_leds_unregister(wl);
5798 b43_wireless_exit(dev, wl); 5799 b43_wireless_exit(dev, wl);
5800 kfree(dev);
5799} 5801}
5800 5802
5801static struct ssb_driver b43_ssb_driver = { 5803static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/broadcom/b43/phy_ac.c b/drivers/net/wireless/broadcom/b43/phy_ac.c
index e75633d67938..52f8abad8831 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ac.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ac.c
@@ -61,7 +61,7 @@ static void b43_phy_ac_op_radio_write(struct b43_wldev *dev, u16 reg,
61 61
62static unsigned int b43_phy_ac_op_get_default_chan(struct b43_wldev *dev) 62static unsigned int b43_phy_ac_op_get_default_chan(struct b43_wldev *dev)
63{ 63{
64 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 64 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
65 return 11; 65 return 11;
66 return 36; 66 return 36;
67} 67}
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
index ec2b9c577b90..85f2ca989565 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.c
+++ b/drivers/net/wireless/broadcom/b43/phy_common.c
@@ -436,7 +436,7 @@ int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
436 * firmware from sending ghost packets. 436 * firmware from sending ghost packets.
437 */ 437 */
438 channelcookie = new_channel; 438 channelcookie = new_channel;
439 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 439 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
440 channelcookie |= B43_SHM_SH_CHAN_5GHZ; 440 channelcookie |= B43_SHM_SH_CHAN_5GHZ;
441 /* FIXME: set 40Mhz flag if required */ 441 /* FIXME: set 40Mhz flag if required */
442 if (0) 442 if (0)
diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c
index bd68945965d6..718c90e81696 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ht.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ht.c
@@ -568,7 +568,7 @@ static void b43_phy_ht_tx_power_ctl(struct b43_wldev *dev, bool enable)
568 } else { 568 } else {
569 b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits); 569 b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits);
570 570
571 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 571 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
572 for (i = 0; i < 3; i++) 572 for (i = 0; i < 3; i++)
573 b43_phy_write(dev, cmd_regs[i], 0x32); 573 b43_phy_write(dev, cmd_regs[i], 0x32);
574 } 574 }
@@ -643,7 +643,7 @@ static void b43_phy_ht_tx_power_ctl_setup(struct b43_wldev *dev)
643 u16 freq = dev->phy.chandef->chan->center_freq; 643 u16 freq = dev->phy.chandef->chan->center_freq;
644 int i, c; 644 int i, c;
645 645
646 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 646 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
647 for (c = 0; c < 3; c++) { 647 for (c = 0; c < 3; c++) {
648 target[c] = sprom->core_pwr_info[c].maxpwr_2g; 648 target[c] = sprom->core_pwr_info[c].maxpwr_2g;
649 a1[c] = sprom->core_pwr_info[c].pa_2g[0]; 649 a1[c] = sprom->core_pwr_info[c].pa_2g[0];
@@ -777,7 +777,7 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
777 const struct b43_phy_ht_channeltab_e_phy *e, 777 const struct b43_phy_ht_channeltab_e_phy *e,
778 struct ieee80211_channel *new_channel) 778 struct ieee80211_channel *new_channel)
779{ 779{
780 if (new_channel->band == IEEE80211_BAND_5GHZ) { 780 if (new_channel->band == NL80211_BAND_5GHZ) {
781 /* Switch to 2 GHz for a moment to access B-PHY regs */ 781 /* Switch to 2 GHz for a moment to access B-PHY regs */
782 b43_phy_mask(dev, B43_PHY_HT_BANDCTL, ~B43_PHY_HT_BANDCTL_5GHZ); 782 b43_phy_mask(dev, B43_PHY_HT_BANDCTL, ~B43_PHY_HT_BANDCTL_5GHZ);
783 783
@@ -805,7 +805,7 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
805 } else { 805 } else {
806 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN, 806 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN,
807 B43_PHY_HT_CLASS_CTL_OFDM_EN); 807 B43_PHY_HT_CLASS_CTL_OFDM_EN);
808 if (new_channel->band == IEEE80211_BAND_2GHZ) 808 if (new_channel->band == NL80211_BAND_2GHZ)
809 b43_phy_mask(dev, B43_PHY_HT_TEST, ~0x840); 809 b43_phy_mask(dev, B43_PHY_HT_TEST, ~0x840);
810 } 810 }
811 811
@@ -916,7 +916,7 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
916 if (0) /* TODO: condition */ 916 if (0) /* TODO: condition */
917 ; /* TODO: PHY op on reg 0x217 */ 917 ; /* TODO: PHY op on reg 0x217 */
918 918
919 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 919 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
920 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0); 920 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0);
921 else 921 else
922 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 922 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN,
@@ -1005,7 +1005,7 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
1005 b43_phy_ht_classifier(dev, 0, 0); 1005 b43_phy_ht_classifier(dev, 0, 0);
1006 b43_phy_ht_read_clip_detection(dev, clip_state); 1006 b43_phy_ht_read_clip_detection(dev, clip_state);
1007 1007
1008 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 1008 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
1009 b43_phy_ht_bphy_init(dev); 1009 b43_phy_ht_bphy_init(dev);
1010 1010
1011 b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0), 1011 b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0),
@@ -1077,7 +1077,7 @@ static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev,
1077 enum nl80211_channel_type channel_type = 1077 enum nl80211_channel_type channel_type =
1078 cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef); 1078 cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
1079 1079
1080 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 1080 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
1081 if ((new_channel < 1) || (new_channel > 14)) 1081 if ((new_channel < 1) || (new_channel > 14))
1082 return -EINVAL; 1082 return -EINVAL;
1083 } else { 1083 } else {
@@ -1089,7 +1089,7 @@ static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev,
1089 1089
1090static unsigned int b43_phy_ht_op_get_default_chan(struct b43_wldev *dev) 1090static unsigned int b43_phy_ht_op_get_default_chan(struct b43_wldev *dev)
1091{ 1091{
1092 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 1092 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
1093 return 11; 1093 return 11;
1094 return 36; 1094 return 36;
1095} 1095}
diff --git a/drivers/net/wireless/broadcom/b43/phy_lcn.c b/drivers/net/wireless/broadcom/b43/phy_lcn.c
index 97461ccf3e1e..63bd29f070f7 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lcn.c
@@ -108,7 +108,7 @@ static void b43_radio_2064_channel_setup(struct b43_wldev *dev)
108/* wlc_radio_2064_init */ 108/* wlc_radio_2064_init */
109static void b43_radio_2064_init(struct b43_wldev *dev) 109static void b43_radio_2064_init(struct b43_wldev *dev)
110{ 110{
111 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 111 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
112 b43_radio_write(dev, 0x09c, 0x0020); 112 b43_radio_write(dev, 0x09c, 0x0020);
113 b43_radio_write(dev, 0x105, 0x0008); 113 b43_radio_write(dev, 0x105, 0x0008);
114 } else { 114 } else {
@@ -535,7 +535,7 @@ static void b43_phy_lcn_tx_pwr_ctl_init(struct b43_wldev *dev)
535 b43_mac_suspend(dev); 535 b43_mac_suspend(dev);
536 536
537 if (!dev->phy.lcn->hw_pwr_ctl_capable) { 537 if (!dev->phy.lcn->hw_pwr_ctl_capable) {
538 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 538 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
539 tx_gains.gm_gain = 4; 539 tx_gains.gm_gain = 4;
540 tx_gains.pga_gain = 12; 540 tx_gains.pga_gain = 12;
541 tx_gains.pad_gain = 12; 541 tx_gains.pad_gain = 12;
@@ -720,7 +720,7 @@ static int b43_phy_lcn_op_init(struct b43_wldev *dev)
720 else 720 else
721 B43_WARN_ON(1); 721 B43_WARN_ON(1);
722 722
723 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 723 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
724 b43_phy_lcn_tx_pwr_ctl_init(dev); 724 b43_phy_lcn_tx_pwr_ctl_init(dev);
725 725
726 b43_switch_channel(dev, dev->phy.channel); 726 b43_switch_channel(dev, dev->phy.channel);
@@ -779,7 +779,7 @@ static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev,
779 enum nl80211_channel_type channel_type = 779 enum nl80211_channel_type channel_type =
780 cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef); 780 cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
781 781
782 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 782 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
783 if ((new_channel < 1) || (new_channel > 14)) 783 if ((new_channel < 1) || (new_channel > 14))
784 return -EINVAL; 784 return -EINVAL;
785 } else { 785 } else {
@@ -791,7 +791,7 @@ static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev,
791 791
792static unsigned int b43_phy_lcn_op_get_default_chan(struct b43_wldev *dev) 792static unsigned int b43_phy_lcn_op_get_default_chan(struct b43_wldev *dev)
793{ 793{
794 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 794 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
795 return 1; 795 return 1;
796 return 36; 796 return 36;
797} 797}
diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
index 058a9f232050..6922cbb99a04 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lp.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
@@ -46,7 +46,7 @@ static inline u16 channel2freq_lp(u8 channel)
46 46
47static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev) 47static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev)
48{ 48{
49 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 49 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
50 return 1; 50 return 1;
51 return 36; 51 return 36;
52} 52}
@@ -91,7 +91,7 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
91 u32 ofdmpo; 91 u32 ofdmpo;
92 int i; 92 int i;
93 93
94 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 94 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
95 lpphy->tx_isolation_med_band = sprom->tri2g; 95 lpphy->tx_isolation_med_band = sprom->tri2g;
96 lpphy->bx_arch = sprom->bxa2g; 96 lpphy->bx_arch = sprom->bxa2g;
97 lpphy->rx_pwr_offset = sprom->rxpo2g; 97 lpphy->rx_pwr_offset = sprom->rxpo2g;
@@ -174,7 +174,7 @@ static void lpphy_adjust_gain_table(struct b43_wldev *dev, u32 freq)
174 174
175 B43_WARN_ON(dev->phy.rev >= 2); 175 B43_WARN_ON(dev->phy.rev >= 2);
176 176
177 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 177 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
178 isolation = lpphy->tx_isolation_med_band; 178 isolation = lpphy->tx_isolation_med_band;
179 else if (freq <= 5320) 179 else if (freq <= 5320)
180 isolation = lpphy->tx_isolation_low_band; 180 isolation = lpphy->tx_isolation_low_band;
@@ -238,7 +238,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
238 b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB, 238 b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB,
239 0xFF00, lpphy->rx_pwr_offset); 239 0xFF00, lpphy->rx_pwr_offset);
240 if ((sprom->boardflags_lo & B43_BFL_FEM) && 240 if ((sprom->boardflags_lo & B43_BFL_FEM) &&
241 ((b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) || 241 ((b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ||
242 (sprom->boardflags_hi & B43_BFH_PAREF))) { 242 (sprom->boardflags_hi & B43_BFH_PAREF))) {
243 ssb_pmu_set_ldo_voltage(&bus->chipco, LDO_PAREF, 0x28); 243 ssb_pmu_set_ldo_voltage(&bus->chipco, LDO_PAREF, 0x28);
244 ssb_pmu_set_ldo_paref(&bus->chipco, true); 244 ssb_pmu_set_ldo_paref(&bus->chipco, true);
@@ -280,7 +280,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
280 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xC0FF, 0x0900); 280 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xC0FF, 0x0900);
281 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A); 281 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A);
282 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00); 282 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00);
283 } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ || 283 } else if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ ||
284 (dev->dev->board_type == SSB_BOARD_BU4312) || 284 (dev->dev->board_type == SSB_BOARD_BU4312) ||
285 (dev->phy.rev == 0 && (sprom->boardflags_lo & B43_BFL_FEM))) { 285 (dev->phy.rev == 0 && (sprom->boardflags_lo & B43_BFL_FEM))) {
286 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001); 286 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001);
@@ -326,7 +326,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
326 //FIXME the Broadcom driver caches & delays this HF write! 326 //FIXME the Broadcom driver caches & delays this HF write!
327 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_PR45960W); 327 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_PR45960W);
328 } 328 }
329 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 329 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
330 b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x8000); 330 b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x8000);
331 b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0040); 331 b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0040);
332 b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0xA400); 332 b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0xA400);
@@ -466,7 +466,7 @@ static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
466 b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40); 466 b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40);
467 } 467 }
468 468
469 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 469 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
470 b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x40); 470 b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x40);
471 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0xB00); 471 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0xB00);
472 b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x6); 472 b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x6);
@@ -547,7 +547,7 @@ static void lpphy_2062_init(struct b43_wldev *dev)
547 b43_radio_write(dev, B2062_S_BG_CTL1, 547 b43_radio_write(dev, B2062_S_BG_CTL1,
548 (b43_radio_read(dev, B2062_N_COMM2) >> 1) | 0x80); 548 (b43_radio_read(dev, B2062_N_COMM2) >> 1) | 0x80);
549 } 549 }
550 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 550 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
551 b43_radio_set(dev, B2062_N_TSSI_CTL0, 0x1); 551 b43_radio_set(dev, B2062_N_TSSI_CTL0, 0x1);
552 else 552 else
553 b43_radio_mask(dev, B2062_N_TSSI_CTL0, ~0x1); 553 b43_radio_mask(dev, B2062_N_TSSI_CTL0, ~0x1);
@@ -746,7 +746,7 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
746 lpphy->crs_sys_disable = false; 746 lpphy->crs_sys_disable = false;
747 747
748 if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) { 748 if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
749 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 749 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
750 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 750 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL,
751 0xFF1F, 0x60); 751 0xFF1F, 0x60);
752 else 752 else
@@ -807,7 +807,7 @@ static void lpphy_disable_rx_gain_override(struct b43_wldev *dev)
807 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF); 807 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF);
808 if (dev->phy.rev >= 2) { 808 if (dev->phy.rev >= 2) {
809 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF); 809 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
810 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 810 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
811 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF); 811 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF);
812 b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7); 812 b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7);
813 } 813 }
@@ -823,7 +823,7 @@ static void lpphy_enable_rx_gain_override(struct b43_wldev *dev)
823 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40); 823 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
824 if (dev->phy.rev >= 2) { 824 if (dev->phy.rev >= 2) {
825 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100); 825 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
826 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 826 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
827 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400); 827 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400);
828 b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8); 828 b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8);
829 } 829 }
@@ -951,7 +951,7 @@ static void lpphy_rev2plus_set_rx_gain(struct b43_wldev *dev, u32 gain)
951 0xFBFF, ext_lna << 10); 951 0xFBFF, ext_lna << 10);
952 b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain); 952 b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain);
953 b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF0, high_gain); 953 b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF0, high_gain);
954 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 954 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
955 tmp = (gain >> 2) & 0x3; 955 tmp = (gain >> 2) & 0x3;
956 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 956 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL,
957 0xE7FF, tmp<<11); 957 0xE7FF, tmp<<11);
@@ -1344,7 +1344,7 @@ static void lpphy_calibrate_rc(struct b43_wldev *dev)
1344 if (dev->phy.rev >= 2) { 1344 if (dev->phy.rev >= 2) {
1345 lpphy_rev2plus_rc_calib(dev); 1345 lpphy_rev2plus_rc_calib(dev);
1346 } else if (!lpphy->rc_cap) { 1346 } else if (!lpphy->rc_cap) {
1347 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 1347 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
1348 lpphy_rev0_1_rc_calib(dev); 1348 lpphy_rev0_1_rc_calib(dev);
1349 } else { 1349 } else {
1350 lpphy_set_rc_cap(dev); 1350 lpphy_set_rc_cap(dev);
@@ -1548,7 +1548,7 @@ static void lpphy_tx_pctl_init_sw(struct b43_wldev *dev)
1548{ 1548{
1549 struct lpphy_tx_gains gains; 1549 struct lpphy_tx_gains gains;
1550 1550
1551 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 1551 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
1552 gains.gm = 4; 1552 gains.gm = 4;
1553 gains.pad = 12; 1553 gains.pad = 12;
1554 gains.pga = 12; 1554 gains.pga = 12;
@@ -1902,7 +1902,7 @@ static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx,
1902 1902
1903 lpphy_set_trsw_over(dev, tx, rx); 1903 lpphy_set_trsw_over(dev, tx, rx);
1904 1904
1905 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 1905 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
1906 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8); 1906 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8);
1907 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 1907 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
1908 0xFFF7, pa << 3); 1908 0xFFF7, pa << 3);
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 9f0bcf3b8414..a5557d70689f 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -105,9 +105,9 @@ enum n_rail_type {
105 105
106static inline bool b43_nphy_ipa(struct b43_wldev *dev) 106static inline bool b43_nphy_ipa(struct b43_wldev *dev)
107{ 107{
108 enum ieee80211_band band = b43_current_band(dev->wl); 108 enum nl80211_band band = b43_current_band(dev->wl);
109 return ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) || 109 return ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) ||
110 (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)); 110 (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ));
111} 111}
112 112
113/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */ 113/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */
@@ -357,7 +357,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
357 break; 357 break;
358 case N_INTC_OVERRIDE_PA: 358 case N_INTC_OVERRIDE_PA:
359 tmp = 0x0030; 359 tmp = 0x0030;
360 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 360 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
361 val = value << 5; 361 val = value << 5;
362 else 362 else
363 val = value << 4; 363 val = value << 4;
@@ -365,7 +365,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
365 b43_phy_set(dev, reg, 0x1000); 365 b43_phy_set(dev, reg, 0x1000);
366 break; 366 break;
367 case N_INTC_OVERRIDE_EXT_LNA_PU: 367 case N_INTC_OVERRIDE_EXT_LNA_PU:
368 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 368 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
369 tmp = 0x0001; 369 tmp = 0x0001;
370 tmp2 = 0x0004; 370 tmp2 = 0x0004;
371 val = value; 371 val = value;
@@ -378,7 +378,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
378 b43_phy_mask(dev, reg, ~tmp2); 378 b43_phy_mask(dev, reg, ~tmp2);
379 break; 379 break;
380 case N_INTC_OVERRIDE_EXT_LNA_GAIN: 380 case N_INTC_OVERRIDE_EXT_LNA_GAIN:
381 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 381 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
382 tmp = 0x0002; 382 tmp = 0x0002;
383 tmp2 = 0x0008; 383 tmp2 = 0x0008;
384 val = value << 1; 384 val = value << 1;
@@ -465,7 +465,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
465 } 465 }
466 break; 466 break;
467 case N_INTC_OVERRIDE_PA: 467 case N_INTC_OVERRIDE_PA:
468 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 468 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
469 tmp = 0x0020; 469 tmp = 0x0020;
470 val = value << 5; 470 val = value << 5;
471 } else { 471 } else {
@@ -475,7 +475,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
475 b43_phy_maskset(dev, reg, ~tmp, val); 475 b43_phy_maskset(dev, reg, ~tmp, val);
476 break; 476 break;
477 case N_INTC_OVERRIDE_EXT_LNA_PU: 477 case N_INTC_OVERRIDE_EXT_LNA_PU:
478 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 478 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
479 tmp = 0x0001; 479 tmp = 0x0001;
480 val = value; 480 val = value;
481 } else { 481 } else {
@@ -485,7 +485,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
485 b43_phy_maskset(dev, reg, ~tmp, val); 485 b43_phy_maskset(dev, reg, ~tmp, val);
486 break; 486 break;
487 case N_INTC_OVERRIDE_EXT_LNA_GAIN: 487 case N_INTC_OVERRIDE_EXT_LNA_GAIN:
488 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 488 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
489 tmp = 0x0002; 489 tmp = 0x0002;
490 val = value << 1; 490 val = value << 1;
491 } else { 491 } else {
@@ -600,7 +600,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
600 b43_nphy_stay_in_carrier_search(dev, 1); 600 b43_nphy_stay_in_carrier_search(dev, 1);
601 601
602 if (nphy->gain_boost) { 602 if (nphy->gain_boost) {
603 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 603 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
604 gain[0] = 6; 604 gain[0] = 6;
605 gain[1] = 6; 605 gain[1] = 6;
606 } else { 606 } else {
@@ -736,7 +736,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
736 switch (phy->radio_rev) { 736 switch (phy->radio_rev) {
737 case 0 ... 4: 737 case 0 ... 4:
738 case 6: 738 case 6:
739 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 739 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
740 b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_R1, 0x3f); 740 b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_R1, 0x3f);
741 b43_radio_write(dev, R2057_CP_KPD_IDAC, 0x3f); 741 b43_radio_write(dev, R2057_CP_KPD_IDAC, 0x3f);
742 b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C1, 0x8); 742 b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C1, 0x8);
@@ -751,7 +751,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
751 case 9: /* e.g. PHY rev 16 */ 751 case 9: /* e.g. PHY rev 16 */
752 b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x20); 752 b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x20);
753 b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x18); 753 b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x18);
754 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 754 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
755 b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x38); 755 b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x38);
756 b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x0f); 756 b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x0f);
757 757
@@ -775,7 +775,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
775 break; 775 break;
776 } 776 }
777 777
778 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 778 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
779 u16 txmix2g_tune_boost_pu = 0; 779 u16 txmix2g_tune_boost_pu = 0;
780 u16 pad2g_tune_pus = 0; 780 u16 pad2g_tune_pus = 0;
781 781
@@ -1135,7 +1135,7 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
1135{ 1135{
1136 struct b43_phy *phy = &dev->phy; 1136 struct b43_phy *phy = &dev->phy;
1137 struct ssb_sprom *sprom = dev->dev->bus_sprom; 1137 struct ssb_sprom *sprom = dev->dev->bus_sprom;
1138 enum ieee80211_band band = b43_current_band(dev->wl); 1138 enum nl80211_band band = b43_current_band(dev->wl);
1139 u16 offset; 1139 u16 offset;
1140 u8 i; 1140 u8 i;
1141 u16 bias, cbias; 1141 u16 bias, cbias;
@@ -1152,10 +1152,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
1152 dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC); 1152 dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
1153 1153
1154 b43_chantab_radio_2056_upload(dev, e); 1154 b43_chantab_radio_2056_upload(dev, e);
1155 b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ); 1155 b2056_upload_syn_pll_cp2(dev, band == NL80211_BAND_5GHZ);
1156 1156
1157 if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR && 1157 if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
1158 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 1158 b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
1159 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); 1159 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
1160 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); 1160 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
1161 if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 || 1161 if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
@@ -1168,21 +1168,21 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
1168 } 1168 }
1169 } 1169 }
1170 if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 && 1170 if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
1171 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 1171 b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
1172 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f); 1172 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
1173 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f); 1173 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
1174 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b); 1174 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
1175 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20); 1175 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
1176 } 1176 }
1177 if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR && 1177 if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
1178 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 1178 b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
1179 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F); 1179 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
1180 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F); 1180 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
1181 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05); 1181 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
1182 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C); 1182 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
1183 } 1183 }
1184 1184
1185 if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) { 1185 if (dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) {
1186 for (i = 0; i < 2; i++) { 1186 for (i = 0; i < 2; i++) {
1187 offset = i ? B2056_TX1 : B2056_TX0; 1187 offset = i ? B2056_TX1 : B2056_TX0;
1188 if (dev->phy.rev >= 5) { 1188 if (dev->phy.rev >= 5) {
@@ -1244,7 +1244,7 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
1244 } 1244 }
1245 b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee); 1245 b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
1246 } 1246 }
1247 } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) { 1247 } else if (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ) {
1248 u16 freq = phy->chandef->chan->center_freq; 1248 u16 freq = phy->chandef->chan->center_freq;
1249 if (freq < 5100) { 1249 if (freq < 5100) {
1250 paa_boost = 0xA; 1250 paa_boost = 0xA;
@@ -1501,7 +1501,7 @@ static void b43_radio_init2055(struct b43_wldev *dev)
1501 /* Follow wl, not specs. Do not force uploading all regs */ 1501 /* Follow wl, not specs. Do not force uploading all regs */
1502 b2055_upload_inittab(dev, 0, 0); 1502 b2055_upload_inittab(dev, 0, 0);
1503 } else { 1503 } else {
1504 bool ghz5 = b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ; 1504 bool ghz5 = b43_current_band(dev->wl) == NL80211_BAND_5GHZ;
1505 b2055_upload_inittab(dev, ghz5, 0); 1505 b2055_upload_inittab(dev, ghz5, 0);
1506 } 1506 }
1507 b43_radio_init2055_post(dev); 1507 b43_radio_init2055_post(dev);
@@ -1785,7 +1785,7 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code,
1785 b43_phy_maskset(dev, reg, 0xFFC3, 0); 1785 b43_phy_maskset(dev, reg, 0xFFC3, 0);
1786 1786
1787 if (rssi_type == N_RSSI_W1) 1787 if (rssi_type == N_RSSI_W1)
1788 val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8; 1788 val = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 4 : 8;
1789 else if (rssi_type == N_RSSI_W2) 1789 else if (rssi_type == N_RSSI_W2)
1790 val = 16; 1790 val = 16;
1791 else 1791 else
@@ -1813,12 +1813,12 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code,
1813 1813
1814 if (rssi_type != N_RSSI_IQ && 1814 if (rssi_type != N_RSSI_IQ &&
1815 rssi_type != N_RSSI_TBD) { 1815 rssi_type != N_RSSI_TBD) {
1816 enum ieee80211_band band = 1816 enum nl80211_band band =
1817 b43_current_band(dev->wl); 1817 b43_current_band(dev->wl);
1818 1818
1819 if (dev->phy.rev < 7) { 1819 if (dev->phy.rev < 7) {
1820 if (b43_nphy_ipa(dev)) 1820 if (b43_nphy_ipa(dev))
1821 val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE; 1821 val = (band == NL80211_BAND_5GHZ) ? 0xC : 0xE;
1822 else 1822 else
1823 val = 0x11; 1823 val = 0x11;
1824 reg = (i == 0) ? B2056_TX0 : B2056_TX1; 1824 reg = (i == 0) ? B2056_TX0 : B2056_TX1;
@@ -2120,7 +2120,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
2120 1, 0, false); 2120 1, 0, false);
2121 b43_nphy_rf_ctl_override_rev7(dev, 0x80, 1, 0, false, 0); 2121 b43_nphy_rf_ctl_override_rev7(dev, 0x80, 1, 0, false, 0);
2122 b43_nphy_rf_ctl_override_rev7(dev, 0x40, 1, 0, false, 0); 2122 b43_nphy_rf_ctl_override_rev7(dev, 0x40, 1, 0, false, 0);
2123 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 2123 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
2124 b43_nphy_rf_ctl_override_rev7(dev, 0x20, 0, 0, false, 2124 b43_nphy_rf_ctl_override_rev7(dev, 0x20, 0, 0, false,
2125 0); 2125 0);
2126 b43_nphy_rf_ctl_override_rev7(dev, 0x10, 1, 0, false, 2126 b43_nphy_rf_ctl_override_rev7(dev, 0x10, 1, 0, false,
@@ -2136,7 +2136,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
2136 b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false); 2136 b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
2137 b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false); 2137 b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
2138 b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false); 2138 b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
2139 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 2139 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
2140 b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false); 2140 b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
2141 b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false); 2141 b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
2142 } else { 2142 } else {
@@ -2257,7 +2257,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
2257 b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]); 2257 b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
2258 2258
2259 /* Store for future configuration */ 2259 /* Store for future configuration */
2260 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 2260 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
2261 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G; 2261 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
2262 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G; 2262 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
2263 } else { 2263 } else {
@@ -2289,7 +2289,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
2289 rssical_phy_regs[11] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y); 2289 rssical_phy_regs[11] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y);
2290 2290
2291 /* Remember for which channel we store configuration */ 2291 /* Remember for which channel we store configuration */
2292 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 2292 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
2293 nphy->rssical_chanspec_2G.center_freq = phy->chandef->chan->center_freq; 2293 nphy->rssical_chanspec_2G.center_freq = phy->chandef->chan->center_freq;
2294 else 2294 else
2295 nphy->rssical_chanspec_5G.center_freq = phy->chandef->chan->center_freq; 2295 nphy->rssical_chanspec_5G.center_freq = phy->chandef->chan->center_freq;
@@ -2336,7 +2336,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type)
2336 b43_nphy_read_clip_detection(dev, clip_state); 2336 b43_nphy_read_clip_detection(dev, clip_state);
2337 b43_nphy_write_clip_detection(dev, clip_off); 2337 b43_nphy_write_clip_detection(dev, clip_off);
2338 2338
2339 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 2339 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
2340 override = 0x140; 2340 override = 0x140;
2341 else 2341 else
2342 override = 0x110; 2342 override = 0x110;
@@ -2629,7 +2629,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
2629 b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); 2629 b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
2630 2630
2631 if (nphy->gain_boost) { 2631 if (nphy->gain_boost) {
2632 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ && 2632 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ &&
2633 b43_is_40mhz(dev)) 2633 b43_is_40mhz(dev))
2634 code = 4; 2634 code = 4;
2635 else 2635 else
@@ -2688,7 +2688,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
2688 ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, 2688 ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
2689 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); 2689 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
2690 2690
2691 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 2691 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
2692 b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4); 2692 b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4);
2693} 2693}
2694 2694
@@ -2803,7 +2803,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
2803 scap_val = b43_radio_read(dev, R2057_RCCAL_SCAP_VAL); 2803 scap_val = b43_radio_read(dev, R2057_RCCAL_SCAP_VAL);
2804 2804
2805 if (b43_nphy_ipa(dev)) { 2805 if (b43_nphy_ipa(dev)) {
2806 bool ghz2 = b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ; 2806 bool ghz2 = b43_current_band(dev->wl) == NL80211_BAND_2GHZ;
2807 2807
2808 switch (phy->radio_rev) { 2808 switch (phy->radio_rev) {
2809 case 5: 2809 case 5:
@@ -2831,7 +2831,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
2831 bcap_val_11b[core] = bcap_val; 2831 bcap_val_11b[core] = bcap_val;
2832 lpf_ofdm_20mhz[core] = 4; 2832 lpf_ofdm_20mhz[core] = 4;
2833 lpf_11b[core] = 1; 2833 lpf_11b[core] = 1;
2834 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 2834 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
2835 scap_val_11n_20[core] = 0xc; 2835 scap_val_11n_20[core] = 0xc;
2836 bcap_val_11n_20[core] = 0xc; 2836 bcap_val_11n_20[core] = 0xc;
2837 scap_val_11n_40[core] = 0xa; 2837 scap_val_11n_40[core] = 0xa;
@@ -2982,7 +2982,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
2982 conv = 0x7f; 2982 conv = 0x7f;
2983 filt = 0xee; 2983 filt = 0xee;
2984 } 2984 }
2985 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 2985 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
2986 for (core = 0; core < 2; core++) { 2986 for (core = 0; core < 2; core++) {
2987 if (core == 0) { 2987 if (core == 0) {
2988 b43_radio_write(dev, 0x5F, bias); 2988 b43_radio_write(dev, 0x5F, bias);
@@ -2998,7 +2998,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
2998 } 2998 }
2999 2999
3000 if (b43_nphy_ipa(dev)) { 3000 if (b43_nphy_ipa(dev)) {
3001 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 3001 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
3002 if (phy->radio_rev == 3 || phy->radio_rev == 4 || 3002 if (phy->radio_rev == 3 || phy->radio_rev == 4 ||
3003 phy->radio_rev == 6) { 3003 phy->radio_rev == 6) {
3004 for (core = 0; core < 2; core++) { 3004 for (core = 0; core < 2; core++) {
@@ -3221,7 +3221,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
3221 ARRAY_SIZE(rx2tx_events)); 3221 ARRAY_SIZE(rx2tx_events));
3222 } 3222 }
3223 3223
3224 tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 3224 tmp16 = (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) ?
3225 0x2 : 0x9C40; 3225 0x2 : 0x9C40;
3226 b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16); 3226 b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
3227 3227
@@ -3240,7 +3240,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
3240 b43_ntab_write(dev, B43_NTAB16(8, 0), 2); 3240 b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
3241 b43_ntab_write(dev, B43_NTAB16(8, 16), 2); 3241 b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
3242 3242
3243 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 3243 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
3244 pdet_range = sprom->fem.ghz2.pdet_range; 3244 pdet_range = sprom->fem.ghz2.pdet_range;
3245 else 3245 else
3246 pdet_range = sprom->fem.ghz5.pdet_range; 3246 pdet_range = sprom->fem.ghz5.pdet_range;
@@ -3249,7 +3249,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
3249 switch (pdet_range) { 3249 switch (pdet_range) {
3250 case 3: 3250 case 3:
3251 if (!(dev->phy.rev >= 4 && 3251 if (!(dev->phy.rev >= 4 &&
3252 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) 3252 b43_current_band(dev->wl) == NL80211_BAND_2GHZ))
3253 break; 3253 break;
3254 /* FALL THROUGH */ 3254 /* FALL THROUGH */
3255 case 0: 3255 case 0:
@@ -3261,7 +3261,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
3261 break; 3261 break;
3262 case 2: 3262 case 2:
3263 if (dev->phy.rev >= 6) { 3263 if (dev->phy.rev >= 6) {
3264 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 3264 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
3265 vmid[3] = 0x94; 3265 vmid[3] = 0x94;
3266 else 3266 else
3267 vmid[3] = 0x8e; 3267 vmid[3] = 0x8e;
@@ -3277,7 +3277,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
3277 break; 3277 break;
3278 case 4: 3278 case 4:
3279 case 5: 3279 case 5:
3280 if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) { 3280 if (b43_current_band(dev->wl) != NL80211_BAND_2GHZ) {
3281 if (pdet_range == 4) { 3281 if (pdet_range == 4) {
3282 vmid[3] = 0x8e; 3282 vmid[3] = 0x8e;
3283 tmp16 = 0x96; 3283 tmp16 = 0x96;
@@ -3322,9 +3322,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
3322 /* N PHY WAR TX Chain Update with hw_phytxchain as argument */ 3322 /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
3323 3323
3324 if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR && 3324 if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
3325 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) || 3325 b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ||
3326 (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR && 3326 (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
3327 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) 3327 b43_current_band(dev->wl) == NL80211_BAND_2GHZ))
3328 tmp32 = 0x00088888; 3328 tmp32 = 0x00088888;
3329 else 3329 else
3330 tmp32 = 0x88888888; 3330 tmp32 = 0x88888888;
@@ -3333,7 +3333,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
3333 b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32); 3333 b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
3334 3334
3335 if (dev->phy.rev == 4 && 3335 if (dev->phy.rev == 4 &&
3336 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 3336 b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
3337 b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC, 3337 b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
3338 0x70); 3338 0x70);
3339 b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC, 3339 b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
@@ -3376,7 +3376,7 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
3376 delays1[5] = 0x14; 3376 delays1[5] = 0x14;
3377 } 3377 }
3378 3378
3379 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && 3379 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ &&
3380 nphy->band5g_pwrgain) { 3380 nphy->band5g_pwrgain) {
3381 b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8); 3381 b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
3382 b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8); 3382 b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
@@ -3451,7 +3451,7 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
3451 struct b43_phy *phy = &dev->phy; 3451 struct b43_phy *phy = &dev->phy;
3452 struct b43_phy_n *nphy = phy->n; 3452 struct b43_phy_n *nphy = phy->n;
3453 3453
3454 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 3454 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
3455 b43_nphy_classifier(dev, 1, 0); 3455 b43_nphy_classifier(dev, 1, 0);
3456 else 3456 else
3457 b43_nphy_classifier(dev, 1, 1); 3457 b43_nphy_classifier(dev, 1, 1);
@@ -3586,7 +3586,7 @@ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
3586 gain = (target.pad[core]) | (target.pga[core] << 4) | 3586 gain = (target.pad[core]) | (target.pga[core] << 4) |
3587 (target.txgm[core] << 8); 3587 (target.txgm[core] << 8);
3588 3588
3589 indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 3589 indx = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ?
3590 1 : 0; 3590 1 : 0;
3591 for (i = 0; i < 9; i++) 3591 for (i = 0; i < 9; i++)
3592 if (tbl_iqcal_gainparams[indx][i][0] == gain) 3592 if (tbl_iqcal_gainparams[indx][i][0] == gain)
@@ -3614,7 +3614,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
3614 struct b43_phy_n *nphy = dev->phy.n; 3614 struct b43_phy_n *nphy = dev->phy.n;
3615 u8 i; 3615 u8 i;
3616 u16 bmask, val, tmp; 3616 u16 bmask, val, tmp;
3617 enum ieee80211_band band = b43_current_band(dev->wl); 3617 enum nl80211_band band = b43_current_band(dev->wl);
3618 3618
3619 if (nphy->hang_avoid) 3619 if (nphy->hang_avoid)
3620 b43_nphy_stay_in_carrier_search(dev, 1); 3620 b43_nphy_stay_in_carrier_search(dev, 1);
@@ -3679,7 +3679,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
3679 } 3679 }
3680 b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val); 3680 b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
3681 3681
3682 if (band == IEEE80211_BAND_5GHZ) { 3682 if (band == NL80211_BAND_5GHZ) {
3683 if (phy->rev >= 19) { 3683 if (phy->rev >= 19) {
3684 /* TODO */ 3684 /* TODO */
3685 } else if (phy->rev >= 7) { 3685 } else if (phy->rev >= 7) {
@@ -3770,7 +3770,7 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
3770 txpi[0] = 72; 3770 txpi[0] = 72;
3771 txpi[1] = 72; 3771 txpi[1] = 72;
3772 } else { 3772 } else {
3773 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 3773 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
3774 txpi[0] = sprom->txpid2g[0]; 3774 txpi[0] = sprom->txpid2g[0];
3775 txpi[1] = sprom->txpid2g[1]; 3775 txpi[1] = sprom->txpid2g[1];
3776 } else if (freq >= 4900 && freq < 5100) { 3776 } else if (freq >= 4900 && freq < 5100) {
@@ -3868,7 +3868,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
3868 } else if (phy->rev >= 7) { 3868 } else if (phy->rev >= 7) {
3869 for (core = 0; core < 2; core++) { 3869 for (core = 0; core < 2; core++) {
3870 r = core ? 0x190 : 0x170; 3870 r = core ? 0x190 : 0x170;
3871 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 3871 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
3872 b43_radio_write(dev, r + 0x5, 0x5); 3872 b43_radio_write(dev, r + 0x5, 0x5);
3873 b43_radio_write(dev, r + 0x9, 0xE); 3873 b43_radio_write(dev, r + 0x9, 0xE);
3874 if (phy->rev != 5) 3874 if (phy->rev != 5)
@@ -3892,7 +3892,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
3892 b43_radio_write(dev, r + 0xC, 0); 3892 b43_radio_write(dev, r + 0xC, 0);
3893 } 3893 }
3894 } else { 3894 } else {
3895 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 3895 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
3896 b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128); 3896 b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128);
3897 else 3897 else
3898 b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80); 3898 b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80);
@@ -3909,7 +3909,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
3909 b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8); 3909 b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8);
3910 b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0); 3910 b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0);
3911 b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0); 3911 b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0);
3912 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 3912 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
3913 b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER, 3913 b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
3914 0x5); 3914 0x5);
3915 if (phy->rev != 5) 3915 if (phy->rev != 5)
@@ -4098,7 +4098,7 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
4098 b0[0] = b0[1] = 5612; 4098 b0[0] = b0[1] = 5612;
4099 b1[0] = b1[1] = -1393; 4099 b1[0] = b1[1] = -1393;
4100 } else { 4100 } else {
4101 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 4101 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
4102 for (c = 0; c < 2; c++) { 4102 for (c = 0; c < 2; c++) {
4103 idle[c] = nphy->pwr_ctl_info[c].idle_tssi_2g; 4103 idle[c] = nphy->pwr_ctl_info[c].idle_tssi_2g;
4104 target[c] = sprom->core_pwr_info[c].maxpwr_2g; 4104 target[c] = sprom->core_pwr_info[c].maxpwr_2g;
@@ -4153,11 +4153,11 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
4153 for (c = 0; c < 2; c++) { 4153 for (c = 0; c < 2; c++) {
4154 r = c ? 0x190 : 0x170; 4154 r = c ? 0x190 : 0x170;
4155 if (b43_nphy_ipa(dev)) 4155 if (b43_nphy_ipa(dev))
4156 b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 0xE : 0xC); 4156 b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) ? 0xE : 0xC);
4157 } 4157 }
4158 } else { 4158 } else {
4159 if (b43_nphy_ipa(dev)) { 4159 if (b43_nphy_ipa(dev)) {
4160 tmp = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 0xC : 0xE; 4160 tmp = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 0xC : 0xE;
4161 b43_radio_write(dev, 4161 b43_radio_write(dev,
4162 B2056_TX0 | B2056_TX_TX_SSI_MUX, tmp); 4162 B2056_TX0 | B2056_TX_TX_SSI_MUX, tmp);
4163 b43_radio_write(dev, 4163 b43_radio_write(dev,
@@ -4267,13 +4267,13 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
4267 } else if (phy->rev >= 7) { 4267 } else if (phy->rev >= 7) {
4268 pga_gain = (table[i] >> 24) & 0xf; 4268 pga_gain = (table[i] >> 24) & 0xf;
4269 pad_gain = (table[i] >> 19) & 0x1f; 4269 pad_gain = (table[i] >> 19) & 0x1f;
4270 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 4270 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
4271 rfpwr_offset = rf_pwr_offset_table[pad_gain]; 4271 rfpwr_offset = rf_pwr_offset_table[pad_gain];
4272 else 4272 else
4273 rfpwr_offset = rf_pwr_offset_table[pga_gain]; 4273 rfpwr_offset = rf_pwr_offset_table[pga_gain];
4274 } else { 4274 } else {
4275 pga_gain = (table[i] >> 24) & 0xF; 4275 pga_gain = (table[i] >> 24) & 0xF;
4276 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 4276 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
4277 rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain]; 4277 rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
4278 else 4278 else
4279 rfpwr_offset = 0; /* FIXME */ 4279 rfpwr_offset = 0; /* FIXME */
@@ -4288,7 +4288,7 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
4288static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) 4288static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
4289{ 4289{
4290 struct b43_phy_n *nphy = dev->phy.n; 4290 struct b43_phy_n *nphy = dev->phy.n;
4291 enum ieee80211_band band; 4291 enum nl80211_band band;
4292 u16 tmp; 4292 u16 tmp;
4293 4293
4294 if (!enable) { 4294 if (!enable) {
@@ -4300,12 +4300,12 @@ static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
4300 if (dev->phy.rev >= 7) { 4300 if (dev->phy.rev >= 7) {
4301 tmp = 0x1480; 4301 tmp = 0x1480;
4302 } else if (dev->phy.rev >= 3) { 4302 } else if (dev->phy.rev >= 3) {
4303 if (band == IEEE80211_BAND_5GHZ) 4303 if (band == NL80211_BAND_5GHZ)
4304 tmp = 0x600; 4304 tmp = 0x600;
4305 else 4305 else
4306 tmp = 0x480; 4306 tmp = 0x480;
4307 } else { 4307 } else {
4308 if (band == IEEE80211_BAND_5GHZ) 4308 if (band == NL80211_BAND_5GHZ)
4309 tmp = 0x180; 4309 tmp = 0x180;
4310 else 4310 else
4311 tmp = 0x120; 4311 tmp = 0x120;
@@ -4734,7 +4734,7 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
4734 u16 *rssical_radio_regs = NULL; 4734 u16 *rssical_radio_regs = NULL;
4735 u16 *rssical_phy_regs = NULL; 4735 u16 *rssical_phy_regs = NULL;
4736 4736
4737 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 4737 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
4738 if (!nphy->rssical_chanspec_2G.center_freq) 4738 if (!nphy->rssical_chanspec_2G.center_freq)
4739 return; 4739 return;
4740 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G; 4740 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
@@ -4804,7 +4804,7 @@ static void b43_nphy_tx_cal_radio_setup_rev7(struct b43_wldev *dev)
4804 save[off + 7] = b43_radio_read(dev, r + R2057_TX0_TSSIG); 4804 save[off + 7] = b43_radio_read(dev, r + R2057_TX0_TSSIG);
4805 save[off + 8] = b43_radio_read(dev, r + R2057_TX0_TSSI_MISC1); 4805 save[off + 8] = b43_radio_read(dev, r + R2057_TX0_TSSI_MISC1);
4806 4806
4807 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 4807 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
4808 b43_radio_write(dev, r + R2057_TX0_TX_SSI_MASTER, 0xA); 4808 b43_radio_write(dev, r + R2057_TX0_TX_SSI_MASTER, 0xA);
4809 b43_radio_write(dev, r + R2057_TX0_IQCAL_VCM_HG, 0x43); 4809 b43_radio_write(dev, r + R2057_TX0_IQCAL_VCM_HG, 0x43);
4810 b43_radio_write(dev, r + R2057_TX0_IQCAL_IDAC, 0x55); 4810 b43_radio_write(dev, r + R2057_TX0_IQCAL_IDAC, 0x55);
@@ -4864,7 +4864,7 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
4864 save[offset + 9] = b43_radio_read(dev, B2055_XOMISC); 4864 save[offset + 9] = b43_radio_read(dev, B2055_XOMISC);
4865 save[offset + 10] = b43_radio_read(dev, B2055_PLL_LFC1); 4865 save[offset + 10] = b43_radio_read(dev, B2055_PLL_LFC1);
4866 4866
4867 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 4867 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
4868 b43_radio_write(dev, tmp | B2055_CAL_RVARCTL, 0x0A); 4868 b43_radio_write(dev, tmp | B2055_CAL_RVARCTL, 0x0A);
4869 b43_radio_write(dev, tmp | B2055_CAL_LPOCTL, 0x40); 4869 b43_radio_write(dev, tmp | B2055_CAL_LPOCTL, 0x40);
4870 b43_radio_write(dev, tmp | B2055_CAL_TS, 0x55); 4870 b43_radio_write(dev, tmp | B2055_CAL_TS, 0x55);
@@ -5005,7 +5005,7 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
5005 b43_nphy_pa_set_tx_dig_filter(dev, 0x186, 5005 b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
5006 tbl_tx_filter_coef_rev4[3]); 5006 tbl_tx_filter_coef_rev4[3]);
5007 } else { 5007 } else {
5008 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 5008 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
5009 b43_nphy_pa_set_tx_dig_filter(dev, 0x186, 5009 b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
5010 tbl_tx_filter_coef_rev4[5]); 5010 tbl_tx_filter_coef_rev4[5]);
5011 if (dev->phy.channel == 14) 5011 if (dev->phy.channel == 14)
@@ -5185,7 +5185,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
5185 false, 0); 5185 false, 0);
5186 } else if (phy->rev == 7) { 5186 } else if (phy->rev == 7) {
5187 b43_radio_maskset(dev, R2057_OVR_REG0, 1 << 4, 1 << 4); 5187 b43_radio_maskset(dev, R2057_OVR_REG0, 1 << 4, 1 << 4);
5188 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 5188 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
5189 b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE0, ~1, 0); 5189 b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE0, ~1, 0);
5190 b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE1, ~1, 0); 5190 b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE1, ~1, 0);
5191 } else { 5191 } else {
@@ -5210,7 +5210,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
5210 b43_ntab_write(dev, B43_NTAB16(8, 18), tmp); 5210 b43_ntab_write(dev, B43_NTAB16(8, 18), tmp);
5211 regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); 5211 regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
5212 regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); 5212 regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
5213 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 5213 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
5214 tmp = 0x0180; 5214 tmp = 0x0180;
5215 else 5215 else
5216 tmp = 0x0120; 5216 tmp = 0x0120;
@@ -5233,7 +5233,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
5233 if (nphy->hang_avoid) 5233 if (nphy->hang_avoid)
5234 b43_nphy_stay_in_carrier_search(dev, 1); 5234 b43_nphy_stay_in_carrier_search(dev, 1);
5235 5235
5236 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 5236 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
5237 rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G; 5237 rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
5238 txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G; 5238 txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
5239 iqcal_chanspec = &nphy->iqcal_chanspec_2G; 5239 iqcal_chanspec = &nphy->iqcal_chanspec_2G;
@@ -5304,7 +5304,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
5304 u16 *txcal_radio_regs = NULL; 5304 u16 *txcal_radio_regs = NULL;
5305 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; 5305 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
5306 5306
5307 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 5307 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
5308 if (!nphy->iqcal_chanspec_2G.center_freq) 5308 if (!nphy->iqcal_chanspec_2G.center_freq)
5309 return; 5309 return;
5310 table = nphy->cal_cache.txcal_coeffs_2G; 5310 table = nphy->cal_cache.txcal_coeffs_2G;
@@ -5332,7 +5332,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
5332 if (dev->phy.rev < 2) 5332 if (dev->phy.rev < 2)
5333 b43_nphy_tx_iq_workaround(dev); 5333 b43_nphy_tx_iq_workaround(dev);
5334 5334
5335 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 5335 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
5336 txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G; 5336 txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
5337 rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G; 5337 rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
5338 } else { 5338 } else {
@@ -5422,7 +5422,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
5422 5422
5423 phy6or5x = dev->phy.rev >= 6 || 5423 phy6or5x = dev->phy.rev >= 6 ||
5424 (dev->phy.rev == 5 && nphy->ipa2g_on && 5424 (dev->phy.rev == 5 && nphy->ipa2g_on &&
5425 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ); 5425 b43_current_band(dev->wl) == NL80211_BAND_2GHZ);
5426 if (phy6or5x) { 5426 if (phy6or5x) {
5427 if (b43_is_40mhz(dev)) { 5427 if (b43_is_40mhz(dev)) {
5428 b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18, 5428 b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
@@ -5657,7 +5657,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
5657 u16 tmp[6]; 5657 u16 tmp[6];
5658 u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna; 5658 u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna;
5659 u32 real, imag; 5659 u32 real, imag;
5660 enum ieee80211_band band; 5660 enum nl80211_band band;
5661 5661
5662 u8 use; 5662 u8 use;
5663 u16 cur_hpf; 5663 u16 cur_hpf;
@@ -5712,18 +5712,18 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
5712 band = b43_current_band(dev->wl); 5712 band = b43_current_band(dev->wl);
5713 5713
5714 if (nphy->rxcalparams & 0xFF000000) { 5714 if (nphy->rxcalparams & 0xFF000000) {
5715 if (band == IEEE80211_BAND_5GHZ) 5715 if (band == NL80211_BAND_5GHZ)
5716 b43_phy_write(dev, rfctl[0], 0x140); 5716 b43_phy_write(dev, rfctl[0], 0x140);
5717 else 5717 else
5718 b43_phy_write(dev, rfctl[0], 0x110); 5718 b43_phy_write(dev, rfctl[0], 0x110);
5719 } else { 5719 } else {
5720 if (band == IEEE80211_BAND_5GHZ) 5720 if (band == NL80211_BAND_5GHZ)
5721 b43_phy_write(dev, rfctl[0], 0x180); 5721 b43_phy_write(dev, rfctl[0], 0x180);
5722 else 5722 else
5723 b43_phy_write(dev, rfctl[0], 0x120); 5723 b43_phy_write(dev, rfctl[0], 0x120);
5724 } 5724 }
5725 5725
5726 if (band == IEEE80211_BAND_5GHZ) 5726 if (band == NL80211_BAND_5GHZ)
5727 b43_phy_write(dev, rfctl[1], 0x148); 5727 b43_phy_write(dev, rfctl[1], 0x148);
5728 else 5728 else
5729 b43_phy_write(dev, rfctl[1], 0x114); 5729 b43_phy_write(dev, rfctl[1], 0x114);
@@ -5919,7 +5919,7 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
5919#if 0 5919#if 0
5920 /* Some extra gains */ 5920 /* Some extra gains */
5921 hw_gain = 6; /* N-PHY specific */ 5921 hw_gain = 6; /* N-PHY specific */
5922 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 5922 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
5923 hw_gain += sprom->antenna_gain.a0; 5923 hw_gain += sprom->antenna_gain.a0;
5924 else 5924 else
5925 hw_gain += sprom->antenna_gain.a1; 5925 hw_gain += sprom->antenna_gain.a1;
@@ -6043,7 +6043,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
6043 u8 tx_pwr_state; 6043 u8 tx_pwr_state;
6044 struct nphy_txgains target; 6044 struct nphy_txgains target;
6045 u16 tmp; 6045 u16 tmp;
6046 enum ieee80211_band tmp2; 6046 enum nl80211_band tmp2;
6047 bool do_rssi_cal; 6047 bool do_rssi_cal;
6048 6048
6049 u16 clip[2]; 6049 u16 clip[2];
@@ -6051,7 +6051,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
6051 6051
6052 if ((dev->phy.rev >= 3) && 6052 if ((dev->phy.rev >= 3) &&
6053 (sprom->boardflags_lo & B43_BFL_EXTLNA) && 6053 (sprom->boardflags_lo & B43_BFL_EXTLNA) &&
6054 (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) { 6054 (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)) {
6055 switch (dev->dev->bus_type) { 6055 switch (dev->dev->bus_type) {
6056#ifdef CONFIG_B43_BCMA 6056#ifdef CONFIG_B43_BCMA
6057 case B43_BUS_BCMA: 6057 case B43_BUS_BCMA:
@@ -6170,7 +6170,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
6170 6170
6171 b43_nphy_classifier(dev, 0, 0); 6171 b43_nphy_classifier(dev, 0, 0);
6172 b43_nphy_read_clip_detection(dev, clip); 6172 b43_nphy_read_clip_detection(dev, clip);
6173 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 6173 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
6174 b43_nphy_bphy_init(dev); 6174 b43_nphy_bphy_init(dev);
6175 6175
6176 tx_pwr_state = nphy->txpwrctrl; 6176 tx_pwr_state = nphy->txpwrctrl;
@@ -6187,7 +6187,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
6187 6187
6188 do_rssi_cal = false; 6188 do_rssi_cal = false;
6189 if (phy->rev >= 3) { 6189 if (phy->rev >= 3) {
6190 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 6190 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
6191 do_rssi_cal = !nphy->rssical_chanspec_2G.center_freq; 6191 do_rssi_cal = !nphy->rssical_chanspec_2G.center_freq;
6192 else 6192 else
6193 do_rssi_cal = !nphy->rssical_chanspec_5G.center_freq; 6193 do_rssi_cal = !nphy->rssical_chanspec_5G.center_freq;
@@ -6201,7 +6201,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
6201 } 6201 }
6202 6202
6203 if (!((nphy->measure_hold & 0x6) != 0)) { 6203 if (!((nphy->measure_hold & 0x6) != 0)) {
6204 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 6204 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
6205 do_cal = !nphy->iqcal_chanspec_2G.center_freq; 6205 do_cal = !nphy->iqcal_chanspec_2G.center_freq;
6206 else 6206 else
6207 do_cal = !nphy->iqcal_chanspec_5G.center_freq; 6207 do_cal = !nphy->iqcal_chanspec_5G.center_freq;
@@ -6291,7 +6291,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
6291 int ch = new_channel->hw_value; 6291 int ch = new_channel->hw_value;
6292 u16 tmp16; 6292 u16 tmp16;
6293 6293
6294 if (new_channel->band == IEEE80211_BAND_5GHZ) { 6294 if (new_channel->band == NL80211_BAND_5GHZ) {
6295 /* Switch to 2 GHz for a moment to access B43_PHY_B_BBCFG */ 6295 /* Switch to 2 GHz for a moment to access B43_PHY_B_BBCFG */
6296 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); 6296 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
6297 6297
@@ -6302,7 +6302,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
6302 B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX); 6302 B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX);
6303 b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16); 6303 b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
6304 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ); 6304 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
6305 } else if (new_channel->band == IEEE80211_BAND_2GHZ) { 6305 } else if (new_channel->band == NL80211_BAND_2GHZ) {
6306 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); 6306 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
6307 tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR); 6307 tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
6308 b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4); 6308 b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
@@ -6319,7 +6319,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
6319 b43_phy_set(dev, B43_PHY_B_TEST, 0x0800); 6319 b43_phy_set(dev, B43_PHY_B_TEST, 0x0800);
6320 } else { 6320 } else {
6321 b43_nphy_classifier(dev, 2, 2); 6321 b43_nphy_classifier(dev, 2, 2);
6322 if (new_channel->band == IEEE80211_BAND_2GHZ) 6322 if (new_channel->band == NL80211_BAND_2GHZ)
6323 b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840); 6323 b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
6324 } 6324 }
6325 6325
@@ -6449,7 +6449,7 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
6449 &(tabent_r7->phy_regs) : &(tabent_r7_2g->phy_regs); 6449 &(tabent_r7->phy_regs) : &(tabent_r7_2g->phy_regs);
6450 6450
6451 if (phy->radio_rev <= 4 || phy->radio_rev == 6) { 6451 if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
6452 tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 2 : 0; 6452 tmp = (channel->band == NL80211_BAND_5GHZ) ? 2 : 0;
6453 b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE0, ~2, tmp); 6453 b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE0, ~2, tmp);
6454 b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE1, ~2, tmp); 6454 b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE1, ~2, tmp);
6455 } 6455 }
@@ -6457,12 +6457,12 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
6457 b43_radio_2057_setup(dev, tabent_r7, tabent_r7_2g); 6457 b43_radio_2057_setup(dev, tabent_r7, tabent_r7_2g);
6458 b43_nphy_channel_setup(dev, phy_regs, channel); 6458 b43_nphy_channel_setup(dev, phy_regs, channel);
6459 } else if (phy->rev >= 3) { 6459 } else if (phy->rev >= 3) {
6460 tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0; 6460 tmp = (channel->band == NL80211_BAND_5GHZ) ? 4 : 0;
6461 b43_radio_maskset(dev, 0x08, 0xFFFB, tmp); 6461 b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
6462 b43_radio_2056_setup(dev, tabent_r3); 6462 b43_radio_2056_setup(dev, tabent_r3);
6463 b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel); 6463 b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel);
6464 } else { 6464 } else {
6465 tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 0x0020 : 0x0050; 6465 tmp = (channel->band == NL80211_BAND_5GHZ) ? 0x0020 : 0x0050;
6466 b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp); 6466 b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp);
6467 b43_radio_2055_setup(dev, tabent_r2); 6467 b43_radio_2055_setup(dev, tabent_r2);
6468 b43_nphy_channel_setup(dev, &(tabent_r2->phy_regs), channel); 6468 b43_nphy_channel_setup(dev, &(tabent_r2->phy_regs), channel);
@@ -6692,7 +6692,7 @@ static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
6692 enum nl80211_channel_type channel_type = 6692 enum nl80211_channel_type channel_type =
6693 cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef); 6693 cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
6694 6694
6695 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 6695 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
6696 if ((new_channel < 1) || (new_channel > 14)) 6696 if ((new_channel < 1) || (new_channel > 14))
6697 return -EINVAL; 6697 return -EINVAL;
6698 } else { 6698 } else {
@@ -6705,7 +6705,7 @@ static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
6705 6705
6706static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev) 6706static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev)
6707{ 6707{
6708 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 6708 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
6709 return 1; 6709 return 1;
6710 return 36; 6710 return 36;
6711} 6711}
diff --git a/drivers/net/wireless/broadcom/b43/tables_lpphy.c b/drivers/net/wireless/broadcom/b43/tables_lpphy.c
index cff187c5616d..ce01e1645df7 100644
--- a/drivers/net/wireless/broadcom/b43/tables_lpphy.c
+++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.c
@@ -560,7 +560,7 @@ void b2062_upload_init_table(struct b43_wldev *dev)
560 560
561 for (i = 0; i < ARRAY_SIZE(b2062_init_tab); i++) { 561 for (i = 0; i < ARRAY_SIZE(b2062_init_tab); i++) {
562 e = &b2062_init_tab[i]; 562 e = &b2062_init_tab[i];
563 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 563 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
564 if (!(e->flags & B206X_FLAG_G)) 564 if (!(e->flags & B206X_FLAG_G))
565 continue; 565 continue;
566 b43_radio_write(dev, e->offset, e->value_g); 566 b43_radio_write(dev, e->offset, e->value_g);
@@ -579,7 +579,7 @@ void b2063_upload_init_table(struct b43_wldev *dev)
579 579
580 for (i = 0; i < ARRAY_SIZE(b2063_init_tab); i++) { 580 for (i = 0; i < ARRAY_SIZE(b2063_init_tab); i++) {
581 e = &b2063_init_tab[i]; 581 e = &b2063_init_tab[i];
582 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 582 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
583 if (!(e->flags & B206X_FLAG_G)) 583 if (!(e->flags & B206X_FLAG_G))
584 continue; 584 continue;
585 b43_radio_write(dev, e->offset, e->value_g); 585 b43_radio_write(dev, e->offset, e->value_g);
@@ -2379,12 +2379,12 @@ static void lpphy_rev2plus_write_gain_table(struct b43_wldev *dev, int offset,
2379 tmp |= data.pga << 8; 2379 tmp |= data.pga << 8;
2380 tmp |= data.gm; 2380 tmp |= data.gm;
2381 if (dev->phy.rev >= 3) { 2381 if (dev->phy.rev >= 3) {
2382 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 2382 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
2383 tmp |= 0x10 << 24; 2383 tmp |= 0x10 << 24;
2384 else 2384 else
2385 tmp |= 0x70 << 24; 2385 tmp |= 0x70 << 24;
2386 } else { 2386 } else {
2387 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 2387 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
2388 tmp |= 0x14 << 24; 2388 tmp |= 0x14 << 24;
2389 else 2389 else
2390 tmp |= 0x7F << 24; 2390 tmp |= 0x7F << 24;
@@ -2423,7 +2423,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
2423 (sprom->boardflags_lo & B43_BFL_HGPA)) 2423 (sprom->boardflags_lo & B43_BFL_HGPA))
2424 lpphy_write_gain_table_bulk(dev, 0, 128, 2424 lpphy_write_gain_table_bulk(dev, 0, 128,
2425 lpphy_rev0_nopa_tx_gain_table); 2425 lpphy_rev0_nopa_tx_gain_table);
2426 else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 2426 else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
2427 lpphy_write_gain_table_bulk(dev, 0, 128, 2427 lpphy_write_gain_table_bulk(dev, 0, 128,
2428 lpphy_rev0_2ghz_tx_gain_table); 2428 lpphy_rev0_2ghz_tx_gain_table);
2429 else 2429 else
@@ -2435,7 +2435,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
2435 (sprom->boardflags_lo & B43_BFL_HGPA)) 2435 (sprom->boardflags_lo & B43_BFL_HGPA))
2436 lpphy_write_gain_table_bulk(dev, 0, 128, 2436 lpphy_write_gain_table_bulk(dev, 0, 128,
2437 lpphy_rev1_nopa_tx_gain_table); 2437 lpphy_rev1_nopa_tx_gain_table);
2438 else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 2438 else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
2439 lpphy_write_gain_table_bulk(dev, 0, 128, 2439 lpphy_write_gain_table_bulk(dev, 0, 128,
2440 lpphy_rev1_2ghz_tx_gain_table); 2440 lpphy_rev1_2ghz_tx_gain_table);
2441 else 2441 else
@@ -2446,7 +2446,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
2446 if (sprom->boardflags_hi & B43_BFH_NOPA) 2446 if (sprom->boardflags_hi & B43_BFH_NOPA)
2447 lpphy_write_gain_table_bulk(dev, 0, 128, 2447 lpphy_write_gain_table_bulk(dev, 0, 128,
2448 lpphy_rev2_nopa_tx_gain_table); 2448 lpphy_rev2_nopa_tx_gain_table);
2449 else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 2449 else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
2450 lpphy_write_gain_table_bulk(dev, 0, 128, 2450 lpphy_write_gain_table_bulk(dev, 0, 128,
2451 lpphy_rev2_2ghz_tx_gain_table); 2451 lpphy_rev2_2ghz_tx_gain_table);
2452 else 2452 else
diff --git a/drivers/net/wireless/broadcom/b43/tables_nphy.c b/drivers/net/wireless/broadcom/b43/tables_nphy.c
index b2f0d245bcf3..44e0957a70cc 100644
--- a/drivers/net/wireless/broadcom/b43/tables_nphy.c
+++ b/drivers/net/wireless/broadcom/b43/tables_nphy.c
@@ -3502,7 +3502,7 @@ static void b43_nphy_tables_init_rev7_volatile(struct b43_wldev *dev)
3502 { 0x2, 0x18, 0x2 }, /* Core 1 */ 3502 { 0x2, 0x18, 0x2 }, /* Core 1 */
3503 }; 3503 };
3504 3504
3505 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 3505 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
3506 antswlut = sprom->fem.ghz5.antswlut; 3506 antswlut = sprom->fem.ghz5.antswlut;
3507 else 3507 else
3508 antswlut = sprom->fem.ghz2.antswlut; 3508 antswlut = sprom->fem.ghz2.antswlut;
@@ -3566,7 +3566,7 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
3566 struct ssb_sprom *sprom = dev->dev->bus_sprom; 3566 struct ssb_sprom *sprom = dev->dev->bus_sprom;
3567 u8 antswlut; 3567 u8 antswlut;
3568 3568
3569 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 3569 if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
3570 antswlut = sprom->fem.ghz5.antswlut; 3570 antswlut = sprom->fem.ghz5.antswlut;
3571 else 3571 else
3572 antswlut = sprom->fem.ghz2.antswlut; 3572 antswlut = sprom->fem.ghz2.antswlut;
@@ -3651,7 +3651,7 @@ static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
3651{ 3651{
3652 struct b43_phy *phy = &dev->phy; 3652 struct b43_phy *phy = &dev->phy;
3653 3653
3654 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 3654 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
3655 switch (phy->rev) { 3655 switch (phy->rev) {
3656 case 17: 3656 case 17:
3657 if (phy->radio_rev == 14) 3657 if (phy->radio_rev == 14)
@@ -3698,17 +3698,17 @@ static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
3698const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev) 3698const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
3699{ 3699{
3700 struct b43_phy *phy = &dev->phy; 3700 struct b43_phy *phy = &dev->phy;
3701 enum ieee80211_band band = b43_current_band(dev->wl); 3701 enum nl80211_band band = b43_current_band(dev->wl);
3702 struct ssb_sprom *sprom = dev->dev->bus_sprom; 3702 struct ssb_sprom *sprom = dev->dev->bus_sprom;
3703 3703
3704 if (dev->phy.rev < 3) 3704 if (dev->phy.rev < 3)
3705 return b43_ntab_tx_gain_rev0_1_2; 3705 return b43_ntab_tx_gain_rev0_1_2;
3706 3706
3707 /* rev 3+ */ 3707 /* rev 3+ */
3708 if ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) || 3708 if ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) ||
3709 (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)) { 3709 (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ)) {
3710 return b43_nphy_get_ipa_gain_table(dev); 3710 return b43_nphy_get_ipa_gain_table(dev);
3711 } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 3711 } else if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
3712 switch (phy->rev) { 3712 switch (phy->rev) {
3713 case 6: 3713 case 6:
3714 case 5: 3714 case 5:
@@ -3746,7 +3746,7 @@ const s16 *b43_ntab_get_rf_pwr_offset_table(struct b43_wldev *dev)
3746{ 3746{
3747 struct b43_phy *phy = &dev->phy; 3747 struct b43_phy *phy = &dev->phy;
3748 3748
3749 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 3749 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
3750 switch (phy->rev) { 3750 switch (phy->rev) {
3751 case 17: 3751 case 17:
3752 if (phy->radio_rev == 14) 3752 if (phy->radio_rev == 14)
diff --git a/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
index e347b8d80ea4..704ef1bcb5b1 100644
--- a/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
+++ b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
@@ -701,7 +701,7 @@ void b43_phy_lcn_tables_init(struct b43_wldev *dev)
701 701
702 b43_phy_lcn_upload_static_tables(dev); 702 b43_phy_lcn_upload_static_tables(dev);
703 703
704 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 704 if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
705 if (sprom->boardflags_lo & B43_BFL_FEM) 705 if (sprom->boardflags_lo & B43_BFL_FEM)
706 b43_phy_lcn_load_tx_gain_tab(dev, 706 b43_phy_lcn_load_tx_gain_tab(dev,
707 b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0); 707 b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0);
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index 426dc13c44cd..f6201264de49 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -803,7 +803,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
803 chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; 803 chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
804 switch (chanstat & B43_RX_CHAN_PHYTYPE) { 804 switch (chanstat & B43_RX_CHAN_PHYTYPE) {
805 case B43_PHYTYPE_A: 805 case B43_PHYTYPE_A:
806 status.band = IEEE80211_BAND_5GHZ; 806 status.band = NL80211_BAND_5GHZ;
807 B43_WARN_ON(1); 807 B43_WARN_ON(1);
808 /* FIXME: We don't really know which value the "chanid" contains. 808 /* FIXME: We don't really know which value the "chanid" contains.
809 * So the following assignment might be wrong. */ 809 * So the following assignment might be wrong. */
@@ -811,7 +811,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
811 ieee80211_channel_to_frequency(chanid, status.band); 811 ieee80211_channel_to_frequency(chanid, status.band);
812 break; 812 break;
813 case B43_PHYTYPE_G: 813 case B43_PHYTYPE_G:
814 status.band = IEEE80211_BAND_2GHZ; 814 status.band = NL80211_BAND_2GHZ;
815 /* Somewhere between 478.104 and 508.1084 firmware for G-PHY 815 /* Somewhere between 478.104 and 508.1084 firmware for G-PHY
816 * has been modified to be compatible with N-PHY and others. 816 * has been modified to be compatible with N-PHY and others.
817 */ 817 */
@@ -826,9 +826,9 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
826 /* chanid is the SHM channel cookie. Which is the plain 826 /* chanid is the SHM channel cookie. Which is the plain
827 * channel number in b43. */ 827 * channel number in b43. */
828 if (chanstat & B43_RX_CHAN_5GHZ) 828 if (chanstat & B43_RX_CHAN_5GHZ)
829 status.band = IEEE80211_BAND_5GHZ; 829 status.band = NL80211_BAND_5GHZ;
830 else 830 else
831 status.band = IEEE80211_BAND_2GHZ; 831 status.band = NL80211_BAND_2GHZ;
832 status.freq = 832 status.freq =
833 ieee80211_channel_to_frequency(chanid, status.band); 833 ieee80211_channel_to_frequency(chanid, status.band);
834 break; 834 break;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index afc1fb3e38df..83770d2ea057 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -1056,7 +1056,7 @@ static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev,
1056 b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value); 1056 b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value);
1057 dur = ieee80211_generic_frame_duration(dev->wl->hw, 1057 dur = ieee80211_generic_frame_duration(dev->wl->hw,
1058 dev->wl->vif, 1058 dev->wl->vif,
1059 IEEE80211_BAND_2GHZ, 1059 NL80211_BAND_2GHZ,
1060 size, 1060 size,
1061 rate); 1061 rate);
1062 /* Write PLCP in two parts and timing for packet transfer */ 1062 /* Write PLCP in two parts and timing for packet transfer */
@@ -1122,7 +1122,7 @@ static const u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev,
1122 IEEE80211_STYPE_PROBE_RESP); 1122 IEEE80211_STYPE_PROBE_RESP);
1123 dur = ieee80211_generic_frame_duration(dev->wl->hw, 1123 dur = ieee80211_generic_frame_duration(dev->wl->hw,
1124 dev->wl->vif, 1124 dev->wl->vif,
1125 IEEE80211_BAND_2GHZ, 1125 NL80211_BAND_2GHZ,
1126 *dest_size, 1126 *dest_size,
1127 rate); 1127 rate);
1128 hdr->duration_id = dur; 1128 hdr->duration_id = dur;
@@ -2719,7 +2719,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2719 2719
2720 /* Switch the PHY mode (if necessary). */ 2720 /* Switch the PHY mode (if necessary). */
2721 switch (conf->chandef.chan->band) { 2721 switch (conf->chandef.chan->band) {
2722 case IEEE80211_BAND_2GHZ: 2722 case NL80211_BAND_2GHZ:
2723 if (phy->type == B43legacy_PHYTYPE_B) 2723 if (phy->type == B43legacy_PHYTYPE_B)
2724 new_phymode = B43legacy_PHYMODE_B; 2724 new_phymode = B43legacy_PHYMODE_B;
2725 else 2725 else
@@ -2792,7 +2792,7 @@ out_unlock_mutex:
2792static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates) 2792static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates)
2793{ 2793{
2794 struct ieee80211_supported_band *sband = 2794 struct ieee80211_supported_band *sband =
2795 dev->wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ]; 2795 dev->wl->hw->wiphy->bands[NL80211_BAND_2GHZ];
2796 struct ieee80211_rate *rate; 2796 struct ieee80211_rate *rate;
2797 int i; 2797 int i;
2798 u16 basic, direct, offset, basic_offset, rateptr; 2798 u16 basic, direct, offset, basic_offset, rateptr;
@@ -3630,13 +3630,13 @@ static int b43legacy_setup_modes(struct b43legacy_wldev *dev,
3630 3630
3631 phy->possible_phymodes = 0; 3631 phy->possible_phymodes = 0;
3632 if (have_bphy) { 3632 if (have_bphy) {
3633 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 3633 hw->wiphy->bands[NL80211_BAND_2GHZ] =
3634 &b43legacy_band_2GHz_BPHY; 3634 &b43legacy_band_2GHz_BPHY;
3635 phy->possible_phymodes |= B43legacy_PHYMODE_B; 3635 phy->possible_phymodes |= B43legacy_PHYMODE_B;
3636 } 3636 }
3637 3637
3638 if (have_gphy) { 3638 if (have_gphy) {
3639 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 3639 hw->wiphy->bands[NL80211_BAND_2GHZ] =
3640 &b43legacy_band_2GHz_GPHY; 3640 &b43legacy_band_2GHz_GPHY;
3641 phy->possible_phymodes |= B43legacy_PHYMODE_G; 3641 phy->possible_phymodes |= B43legacy_PHYMODE_G;
3642 } 3642 }
diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c
index 34bf3f0b729f..35ccf400b02c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/xmit.c
+++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c
@@ -565,7 +565,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
565 switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) { 565 switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) {
566 case B43legacy_PHYTYPE_B: 566 case B43legacy_PHYTYPE_B:
567 case B43legacy_PHYTYPE_G: 567 case B43legacy_PHYTYPE_G:
568 status.band = IEEE80211_BAND_2GHZ; 568 status.band = NL80211_BAND_2GHZ;
569 status.freq = chanid + 2400; 569 status.freq = chanid + 2400;
570 break; 570 break;
571 default: 571 default:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index da0cdd313880..2fc0597f2cd0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -250,7 +250,7 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
250 u32 addr, u8 regsz, void *data, bool write) 250 u32 addr, u8 regsz, void *data, bool write)
251{ 251{
252 struct sdio_func *func; 252 struct sdio_func *func;
253 int ret; 253 int ret = -EINVAL;
254 254
255 brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", 255 brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
256 write, fn, addr, regsz); 256 write, fn, addr, regsz);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index d5c2a27573b4..9a567e263bb1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -144,7 +144,7 @@ static struct ieee80211_rate __wl_rates[] = {
144#define wl_a_rates_size (wl_g_rates_size - 4) 144#define wl_a_rates_size (wl_g_rates_size - 4)
145 145
146#define CHAN2G(_channel, _freq) { \ 146#define CHAN2G(_channel, _freq) { \
147 .band = IEEE80211_BAND_2GHZ, \ 147 .band = NL80211_BAND_2GHZ, \
148 .center_freq = (_freq), \ 148 .center_freq = (_freq), \
149 .hw_value = (_channel), \ 149 .hw_value = (_channel), \
150 .flags = IEEE80211_CHAN_DISABLED, \ 150 .flags = IEEE80211_CHAN_DISABLED, \
@@ -153,7 +153,7 @@ static struct ieee80211_rate __wl_rates[] = {
153} 153}
154 154
155#define CHAN5G(_channel) { \ 155#define CHAN5G(_channel) { \
156 .band = IEEE80211_BAND_5GHZ, \ 156 .band = NL80211_BAND_5GHZ, \
157 .center_freq = 5000 + (5 * (_channel)), \ 157 .center_freq = 5000 + (5 * (_channel)), \
158 .hw_value = (_channel), \ 158 .hw_value = (_channel), \
159 .flags = IEEE80211_CHAN_DISABLED, \ 159 .flags = IEEE80211_CHAN_DISABLED, \
@@ -181,13 +181,13 @@ static struct ieee80211_channel __wl_5ghz_channels[] = {
181 * above is added to the band during setup. 181 * above is added to the band during setup.
182 */ 182 */
183static const struct ieee80211_supported_band __wl_band_2ghz = { 183static const struct ieee80211_supported_band __wl_band_2ghz = {
184 .band = IEEE80211_BAND_2GHZ, 184 .band = NL80211_BAND_2GHZ,
185 .bitrates = wl_g_rates, 185 .bitrates = wl_g_rates,
186 .n_bitrates = wl_g_rates_size, 186 .n_bitrates = wl_g_rates_size,
187}; 187};
188 188
189static const struct ieee80211_supported_band __wl_band_5ghz = { 189static const struct ieee80211_supported_band __wl_band_5ghz = {
190 .band = IEEE80211_BAND_5GHZ, 190 .band = NL80211_BAND_5GHZ,
191 .bitrates = wl_a_rates, 191 .bitrates = wl_a_rates,
192 .n_bitrates = wl_a_rates_size, 192 .n_bitrates = wl_a_rates_size,
193}; 193};
@@ -292,13 +292,13 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
292 WARN_ON_ONCE(1); 292 WARN_ON_ONCE(1);
293 } 293 }
294 switch (ch->chan->band) { 294 switch (ch->chan->band) {
295 case IEEE80211_BAND_2GHZ: 295 case NL80211_BAND_2GHZ:
296 ch_inf.band = BRCMU_CHAN_BAND_2G; 296 ch_inf.band = BRCMU_CHAN_BAND_2G;
297 break; 297 break;
298 case IEEE80211_BAND_5GHZ: 298 case NL80211_BAND_5GHZ:
299 ch_inf.band = BRCMU_CHAN_BAND_5G; 299 ch_inf.band = BRCMU_CHAN_BAND_5G;
300 break; 300 break;
301 case IEEE80211_BAND_60GHZ: 301 case NL80211_BAND_60GHZ:
302 default: 302 default:
303 WARN_ON_ONCE(1); 303 WARN_ON_ONCE(1);
304 } 304 }
@@ -2679,9 +2679,9 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
2679 channel = bi->ctl_ch; 2679 channel = bi->ctl_ch;
2680 2680
2681 if (channel <= CH_MAX_2G_CHANNEL) 2681 if (channel <= CH_MAX_2G_CHANNEL)
2682 band = wiphy->bands[IEEE80211_BAND_2GHZ]; 2682 band = wiphy->bands[NL80211_BAND_2GHZ];
2683 else 2683 else
2684 band = wiphy->bands[IEEE80211_BAND_5GHZ]; 2684 band = wiphy->bands[NL80211_BAND_5GHZ];
2685 2685
2686 freq = ieee80211_channel_to_frequency(channel, band->band); 2686 freq = ieee80211_channel_to_frequency(channel, band->band);
2687 notify_channel = ieee80211_get_channel(wiphy, freq); 2687 notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -2788,9 +2788,9 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
2788 cfg->d11inf.decchspec(&ch); 2788 cfg->d11inf.decchspec(&ch);
2789 2789
2790 if (ch.band == BRCMU_CHAN_BAND_2G) 2790 if (ch.band == BRCMU_CHAN_BAND_2G)
2791 band = wiphy->bands[IEEE80211_BAND_2GHZ]; 2791 band = wiphy->bands[NL80211_BAND_2GHZ];
2792 else 2792 else
2793 band = wiphy->bands[IEEE80211_BAND_5GHZ]; 2793 band = wiphy->bands[NL80211_BAND_5GHZ];
2794 2794
2795 freq = ieee80211_channel_to_frequency(ch.chnum, band->band); 2795 freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
2796 cfg->channel = freq; 2796 cfg->channel = freq;
@@ -5215,9 +5215,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
5215 cfg->d11inf.decchspec(&ch); 5215 cfg->d11inf.decchspec(&ch);
5216 5216
5217 if (ch.band == BRCMU_CHAN_BAND_2G) 5217 if (ch.band == BRCMU_CHAN_BAND_2G)
5218 band = wiphy->bands[IEEE80211_BAND_2GHZ]; 5218 band = wiphy->bands[NL80211_BAND_2GHZ];
5219 else 5219 else
5220 band = wiphy->bands[IEEE80211_BAND_5GHZ]; 5220 band = wiphy->bands[NL80211_BAND_5GHZ];
5221 5221
5222 freq = ieee80211_channel_to_frequency(ch.chnum, band->band); 5222 freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
5223 notify_channel = ieee80211_get_channel(wiphy, freq); 5223 notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -5707,11 +5707,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
5707 } 5707 }
5708 5708
5709 wiphy = cfg_to_wiphy(cfg); 5709 wiphy = cfg_to_wiphy(cfg);
5710 band = wiphy->bands[IEEE80211_BAND_2GHZ]; 5710 band = wiphy->bands[NL80211_BAND_2GHZ];
5711 if (band) 5711 if (band)
5712 for (i = 0; i < band->n_channels; i++) 5712 for (i = 0; i < band->n_channels; i++)
5713 band->channels[i].flags = IEEE80211_CHAN_DISABLED; 5713 band->channels[i].flags = IEEE80211_CHAN_DISABLED;
5714 band = wiphy->bands[IEEE80211_BAND_5GHZ]; 5714 band = wiphy->bands[NL80211_BAND_5GHZ];
5715 if (band) 5715 if (band)
5716 for (i = 0; i < band->n_channels; i++) 5716 for (i = 0; i < band->n_channels; i++)
5717 band->channels[i].flags = IEEE80211_CHAN_DISABLED; 5717 band->channels[i].flags = IEEE80211_CHAN_DISABLED;
@@ -5722,9 +5722,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
5722 cfg->d11inf.decchspec(&ch); 5722 cfg->d11inf.decchspec(&ch);
5723 5723
5724 if (ch.band == BRCMU_CHAN_BAND_2G) { 5724 if (ch.band == BRCMU_CHAN_BAND_2G) {
5725 band = wiphy->bands[IEEE80211_BAND_2GHZ]; 5725 band = wiphy->bands[NL80211_BAND_2GHZ];
5726 } else if (ch.band == BRCMU_CHAN_BAND_5G) { 5726 } else if (ch.band == BRCMU_CHAN_BAND_5G) {
5727 band = wiphy->bands[IEEE80211_BAND_5GHZ]; 5727 band = wiphy->bands[NL80211_BAND_5GHZ];
5728 } else { 5728 } else {
5729 brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec); 5729 brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
5730 continue; 5730 continue;
@@ -5839,7 +5839,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
5839 return err; 5839 return err;
5840 } 5840 }
5841 5841
5842 band = cfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ]; 5842 band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ];
5843 list = (struct brcmf_chanspec_list *)pbuf; 5843 list = (struct brcmf_chanspec_list *)pbuf;
5844 num_chan = le32_to_cpu(list->count); 5844 num_chan = le32_to_cpu(list->count);
5845 for (i = 0; i < num_chan; i++) { 5845 for (i = 0; i < num_chan; i++) {
@@ -5871,11 +5871,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
5871 band = WLC_BAND_2G; 5871 band = WLC_BAND_2G;
5872 err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band); 5872 err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
5873 if (!err) { 5873 if (!err) {
5874 bw_cap[IEEE80211_BAND_2GHZ] = band; 5874 bw_cap[NL80211_BAND_2GHZ] = band;
5875 band = WLC_BAND_5G; 5875 band = WLC_BAND_5G;
5876 err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band); 5876 err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
5877 if (!err) { 5877 if (!err) {
5878 bw_cap[IEEE80211_BAND_5GHZ] = band; 5878 bw_cap[NL80211_BAND_5GHZ] = band;
5879 return; 5879 return;
5880 } 5880 }
5881 WARN_ON(1); 5881 WARN_ON(1);
@@ -5890,14 +5890,14 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
5890 5890
5891 switch (mimo_bwcap) { 5891 switch (mimo_bwcap) {
5892 case WLC_N_BW_40ALL: 5892 case WLC_N_BW_40ALL:
5893 bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT; 5893 bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
5894 /* fall-thru */ 5894 /* fall-thru */
5895 case WLC_N_BW_20IN2G_40IN5G: 5895 case WLC_N_BW_20IN2G_40IN5G:
5896 bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT; 5896 bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
5897 /* fall-thru */ 5897 /* fall-thru */
5898 case WLC_N_BW_20ALL: 5898 case WLC_N_BW_20ALL:
5899 bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT; 5899 bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
5900 bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT; 5900 bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
5901 break; 5901 break;
5902 default: 5902 default:
5903 brcmf_err("invalid mimo_bw_cap value\n"); 5903 brcmf_err("invalid mimo_bw_cap value\n");
@@ -5938,7 +5938,7 @@ static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
5938 __le16 mcs_map; 5938 __le16 mcs_map;
5939 5939
5940 /* not allowed in 2.4G band */ 5940 /* not allowed in 2.4G band */
5941 if (band->band == IEEE80211_BAND_2GHZ) 5941 if (band->band == NL80211_BAND_2GHZ)
5942 return; 5942 return;
5943 5943
5944 band->vht_cap.vht_supported = true; 5944 band->vht_cap.vht_supported = true;
@@ -5997,8 +5997,8 @@ static int brcmf_setup_wiphybands(struct wiphy *wiphy)
5997 brcmf_get_bwcap(ifp, bw_cap); 5997 brcmf_get_bwcap(ifp, bw_cap);
5998 } 5998 }
5999 brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n", 5999 brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
6000 nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ], 6000 nmode, vhtmode, bw_cap[NL80211_BAND_2GHZ],
6001 bw_cap[IEEE80211_BAND_5GHZ]); 6001 bw_cap[NL80211_BAND_5GHZ]);
6002 6002
6003 err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain); 6003 err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
6004 if (err) { 6004 if (err) {
@@ -6321,7 +6321,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
6321 } 6321 }
6322 6322
6323 band->n_channels = ARRAY_SIZE(__wl_2ghz_channels); 6323 band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
6324 wiphy->bands[IEEE80211_BAND_2GHZ] = band; 6324 wiphy->bands[NL80211_BAND_2GHZ] = band;
6325 } 6325 }
6326 if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) { 6326 if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
6327 band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz), 6327 band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
@@ -6338,7 +6338,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
6338 } 6338 }
6339 6339
6340 band->n_channels = ARRAY_SIZE(__wl_5ghz_channels); 6340 band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
6341 wiphy->bands[IEEE80211_BAND_5GHZ] = band; 6341 wiphy->bands[NL80211_BAND_5GHZ] = band;
6342 } 6342 }
6343 } 6343 }
6344 err = brcmf_setup_wiphybands(wiphy); 6344 err = brcmf_setup_wiphybands(wiphy);
@@ -6604,13 +6604,13 @@ static void brcmf_free_wiphy(struct wiphy *wiphy)
6604 kfree(wiphy->iface_combinations[i].limits); 6604 kfree(wiphy->iface_combinations[i].limits);
6605 } 6605 }
6606 kfree(wiphy->iface_combinations); 6606 kfree(wiphy->iface_combinations);
6607 if (wiphy->bands[IEEE80211_BAND_2GHZ]) { 6607 if (wiphy->bands[NL80211_BAND_2GHZ]) {
6608 kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels); 6608 kfree(wiphy->bands[NL80211_BAND_2GHZ]->channels);
6609 kfree(wiphy->bands[IEEE80211_BAND_2GHZ]); 6609 kfree(wiphy->bands[NL80211_BAND_2GHZ]);
6610 } 6610 }
6611 if (wiphy->bands[IEEE80211_BAND_5GHZ]) { 6611 if (wiphy->bands[NL80211_BAND_5GHZ]) {
6612 kfree(wiphy->bands[IEEE80211_BAND_5GHZ]->channels); 6612 kfree(wiphy->bands[NL80211_BAND_5GHZ]->channels);
6613 kfree(wiphy->bands[IEEE80211_BAND_5GHZ]); 6613 kfree(wiphy->bands[NL80211_BAND_5GHZ]);
6614 } 6614 }
6615 wiphy_free(wiphy); 6615 wiphy_free(wiphy);
6616} 6616}
@@ -6698,8 +6698,8 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
6698 * cfg80211 here that we do and have it decide we can enable 6698 * cfg80211 here that we do and have it decide we can enable
6699 * it. But first check if device does support 2G operation. 6699 * it. But first check if device does support 2G operation.
6700 */ 6700 */
6701 if (wiphy->bands[IEEE80211_BAND_2GHZ]) { 6701 if (wiphy->bands[NL80211_BAND_2GHZ]) {
6702 cap = &wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap; 6702 cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap.cap;
6703 *cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 6703 *cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
6704 } 6704 }
6705 err = wiphy_register(wiphy); 6705 err = wiphy_register(wiphy);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index b5a49e564f25..c2ac91df35ed 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1430,8 +1430,8 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
1430 1430
1431 freq = ieee80211_channel_to_frequency(ch.chnum, 1431 freq = ieee80211_channel_to_frequency(ch.chnum,
1432 ch.band == BRCMU_CHAN_BAND_2G ? 1432 ch.band == BRCMU_CHAN_BAND_2G ?
1433 IEEE80211_BAND_2GHZ : 1433 NL80211_BAND_2GHZ :
1434 IEEE80211_BAND_5GHZ); 1434 NL80211_BAND_5GHZ);
1435 1435
1436 wdev = &ifp->vif->wdev; 1436 wdev = &ifp->vif->wdev;
1437 cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0); 1437 cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0);
@@ -1900,8 +1900,8 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
1900 mgmt_frame_len = e->datalen - sizeof(*rxframe); 1900 mgmt_frame_len = e->datalen - sizeof(*rxframe);
1901 freq = ieee80211_channel_to_frequency(ch.chnum, 1901 freq = ieee80211_channel_to_frequency(ch.chnum,
1902 ch.band == BRCMU_CHAN_BAND_2G ? 1902 ch.band == BRCMU_CHAN_BAND_2G ?
1903 IEEE80211_BAND_2GHZ : 1903 NL80211_BAND_2GHZ :
1904 IEEE80211_BAND_5GHZ); 1904 NL80211_BAND_5GHZ);
1905 1905
1906 cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0); 1906 cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0);
1907 1907
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 43fd3f402eba..48d7467d270e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -535,9 +535,6 @@ static int qcount[NUMPRIO];
535 535
536#define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL) 536#define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
537 537
538/* Retry count for register access failures */
539static const uint retry_limit = 2;
540
541/* Limit on rounding up frames */ 538/* Limit on rounding up frames */
542static const uint max_roundup = 512; 539static const uint max_roundup = 512;
543 540
@@ -3261,7 +3258,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
3261 const struct firmware *fw, 3258 const struct firmware *fw,
3262 void *nvram, u32 nvlen) 3259 void *nvram, u32 nvlen)
3263{ 3260{
3264 int bcmerror = -EFAULT; 3261 int bcmerror;
3265 u32 rstvec; 3262 u32 rstvec;
3266 3263
3267 sdio_claim_host(bus->sdiodev->func[1]); 3264 sdio_claim_host(bus->sdiodev->func[1]);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index 38bd5890bd53..3a03287fa912 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -636,7 +636,7 @@ static void brcms_reg_apply_radar_flags(struct wiphy *wiphy)
636 struct ieee80211_channel *ch; 636 struct ieee80211_channel *ch;
637 int i; 637 int i;
638 638
639 sband = wiphy->bands[IEEE80211_BAND_5GHZ]; 639 sband = wiphy->bands[NL80211_BAND_5GHZ];
640 if (!sband) 640 if (!sband)
641 return; 641 return;
642 642
@@ -666,7 +666,7 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
666 const struct ieee80211_reg_rule *rule; 666 const struct ieee80211_reg_rule *rule;
667 int band, i; 667 int band, i;
668 668
669 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 669 for (band = 0; band < NUM_NL80211_BANDS; band++) {
670 sband = wiphy->bands[band]; 670 sband = wiphy->bands[band];
671 if (!sband) 671 if (!sband)
672 continue; 672 continue;
@@ -710,7 +710,7 @@ static void brcms_reg_notifier(struct wiphy *wiphy,
710 brcms_reg_apply_beaconing_flags(wiphy, request->initiator); 710 brcms_reg_apply_beaconing_flags(wiphy, request->initiator);
711 711
712 /* Disable radio if all channels disallowed by regulatory */ 712 /* Disable radio if all channels disallowed by regulatory */
713 for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) { 713 for (band = 0; !ch_found && band < NUM_NL80211_BANDS; band++) {
714 sband = wiphy->bands[band]; 714 sband = wiphy->bands[band];
715 if (!sband) 715 if (!sband)
716 continue; 716 continue;
@@ -755,9 +755,9 @@ void brcms_c_regd_init(struct brcms_c_info *wlc)
755 &sup_chan); 755 &sup_chan);
756 756
757 if (band_idx == BAND_2G_INDEX) 757 if (band_idx == BAND_2G_INDEX)
758 sband = wiphy->bands[IEEE80211_BAND_2GHZ]; 758 sband = wiphy->bands[NL80211_BAND_2GHZ];
759 else 759 else
760 sband = wiphy->bands[IEEE80211_BAND_5GHZ]; 760 sband = wiphy->bands[NL80211_BAND_5GHZ];
761 761
762 for (i = 0; i < sband->n_channels; i++) { 762 for (i = 0; i < sband->n_channels; i++) {
763 ch = &sband->channels[i]; 763 ch = &sband->channels[i];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 61ae2768132a..7c2a9a9bc372 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -49,7 +49,7 @@
49 FIF_PSPOLL) 49 FIF_PSPOLL)
50 50
51#define CHAN2GHZ(channel, freqency, chflags) { \ 51#define CHAN2GHZ(channel, freqency, chflags) { \
52 .band = IEEE80211_BAND_2GHZ, \ 52 .band = NL80211_BAND_2GHZ, \
53 .center_freq = (freqency), \ 53 .center_freq = (freqency), \
54 .hw_value = (channel), \ 54 .hw_value = (channel), \
55 .flags = chflags, \ 55 .flags = chflags, \
@@ -58,7 +58,7 @@
58} 58}
59 59
60#define CHAN5GHZ(channel, chflags) { \ 60#define CHAN5GHZ(channel, chflags) { \
61 .band = IEEE80211_BAND_5GHZ, \ 61 .band = NL80211_BAND_5GHZ, \
62 .center_freq = 5000 + 5*(channel), \ 62 .center_freq = 5000 + 5*(channel), \
63 .hw_value = (channel), \ 63 .hw_value = (channel), \
64 .flags = chflags, \ 64 .flags = chflags, \
@@ -217,7 +217,7 @@ static struct ieee80211_rate legacy_ratetable[] = {
217}; 217};
218 218
219static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = { 219static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
220 .band = IEEE80211_BAND_2GHZ, 220 .band = NL80211_BAND_2GHZ,
221 .channels = brcms_2ghz_chantable, 221 .channels = brcms_2ghz_chantable,
222 .n_channels = ARRAY_SIZE(brcms_2ghz_chantable), 222 .n_channels = ARRAY_SIZE(brcms_2ghz_chantable),
223 .bitrates = legacy_ratetable, 223 .bitrates = legacy_ratetable,
@@ -238,7 +238,7 @@ static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
238}; 238};
239 239
240static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = { 240static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = {
241 .band = IEEE80211_BAND_5GHZ, 241 .band = NL80211_BAND_5GHZ,
242 .channels = brcms_5ghz_nphy_chantable, 242 .channels = brcms_5ghz_nphy_chantable,
243 .n_channels = ARRAY_SIZE(brcms_5ghz_nphy_chantable), 243 .n_channels = ARRAY_SIZE(brcms_5ghz_nphy_chantable),
244 .bitrates = legacy_ratetable + BRCMS_LEGACY_5G_RATE_OFFSET, 244 .bitrates = legacy_ratetable + BRCMS_LEGACY_5G_RATE_OFFSET,
@@ -1026,8 +1026,8 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
1026 int has_5g = 0; 1026 int has_5g = 0;
1027 u16 phy_type; 1027 u16 phy_type;
1028 1028
1029 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL; 1029 hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
1030 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; 1030 hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
1031 1031
1032 phy_type = brcms_c_get_phy_type(wl->wlc, 0); 1032 phy_type = brcms_c_get_phy_type(wl->wlc, 0);
1033 if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) { 1033 if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) {
@@ -1038,7 +1038,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
1038 band->ht_cap.mcs.rx_mask[1] = 0; 1038 band->ht_cap.mcs.rx_mask[1] = 0;
1039 band->ht_cap.mcs.rx_highest = cpu_to_le16(72); 1039 band->ht_cap.mcs.rx_highest = cpu_to_le16(72);
1040 } 1040 }
1041 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; 1041 hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
1042 } else { 1042 } else {
1043 return -EPERM; 1043 return -EPERM;
1044 } 1044 }
@@ -1049,7 +1049,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
1049 if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) { 1049 if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) {
1050 band = &wlc->bandstate[BAND_5G_INDEX]->band; 1050 band = &wlc->bandstate[BAND_5G_INDEX]->band;
1051 *band = brcms_band_5GHz_nphy_template; 1051 *band = brcms_band_5GHz_nphy_template;
1052 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band; 1052 hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
1053 } else { 1053 } else {
1054 return -EPERM; 1054 return -EPERM;
1055 } 1055 }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 218cbc8bf3a7..e16ee60639f5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -7076,7 +7076,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7076 channel = BRCMS_CHAN_CHANNEL(rxh->RxChan); 7076 channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
7077 7077
7078 rx_status->band = 7078 rx_status->band =
7079 channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; 7079 channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
7080 rx_status->freq = 7080 rx_status->freq =
7081 ieee80211_channel_to_frequency(channel, rx_status->band); 7081 ieee80211_channel_to_frequency(channel, rx_status->band);
7082 7082
@@ -7143,7 +7143,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7143 * a subset of the 2.4G rates. See bitrates field 7143 * a subset of the 2.4G rates. See bitrates field
7144 * of brcms_band_5GHz_nphy (in mac80211_if.c). 7144 * of brcms_band_5GHz_nphy (in mac80211_if.c).
7145 */ 7145 */
7146 if (rx_status->band == IEEE80211_BAND_5GHZ) 7146 if (rx_status->band == NL80211_BAND_5GHZ)
7147 rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET; 7147 rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET;
7148 7148
7149 /* Determine short preamble and rate_idx */ 7149 /* Determine short preamble and rate_idx */
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index d2353f6e5214..4bd9e2b97e86 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -5836,7 +5836,7 @@ static int airo_get_freq(struct net_device *dev,
5836 ch = le16_to_cpu(status_rid.channel); 5836 ch = le16_to_cpu(status_rid.channel);
5837 if((ch > 0) && (ch < 15)) { 5837 if((ch > 0) && (ch < 15)) {
5838 fwrq->m = 100000 * 5838 fwrq->m = 100000 *
5839 ieee80211_channel_to_frequency(ch, IEEE80211_BAND_2GHZ); 5839 ieee80211_channel_to_frequency(ch, NL80211_BAND_2GHZ);
5840 fwrq->e = 1; 5840 fwrq->e = 1;
5841 } else { 5841 } else {
5842 fwrq->m = ch; 5842 fwrq->m = ch;
@@ -6894,7 +6894,7 @@ static int airo_get_range(struct net_device *dev,
6894 for(i = 0; i < 14; i++) { 6894 for(i = 0; i < 14; i++) {
6895 range->freq[k].i = i + 1; /* List index */ 6895 range->freq[k].i = i + 1; /* List index */
6896 range->freq[k].m = 100000 * 6896 range->freq[k].m = 100000 *
6897 ieee80211_channel_to_frequency(i + 1, IEEE80211_BAND_2GHZ); 6897 ieee80211_channel_to_frequency(i + 1, NL80211_BAND_2GHZ);
6898 range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */ 6898 range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */
6899 } 6899 }
6900 range->num_frequency = k; 6900 range->num_frequency = k;
@@ -7302,7 +7302,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
7302 iwe.cmd = SIOCGIWFREQ; 7302 iwe.cmd = SIOCGIWFREQ;
7303 iwe.u.freq.m = le16_to_cpu(bss->dsChannel); 7303 iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
7304 iwe.u.freq.m = 100000 * 7304 iwe.u.freq.m = 100000 *
7305 ieee80211_channel_to_frequency(iwe.u.freq.m, IEEE80211_BAND_2GHZ); 7305 ieee80211_channel_to_frequency(iwe.u.freq.m, NL80211_BAND_2GHZ);
7306 iwe.u.freq.e = 1; 7306 iwe.u.freq.e = 1;
7307 current_ev = iwe_stream_add_event(info, current_ev, end_buf, 7307 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
7308 &iwe, IW_EV_FREQ_LEN); 7308 &iwe, IW_EV_FREQ_LEN);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index f93a7f71c047..e1e42ed6c412 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -1913,7 +1913,7 @@ static int ipw2100_wdev_init(struct net_device *dev)
1913 if (geo->bg_channels) { 1913 if (geo->bg_channels) {
1914 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band; 1914 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
1915 1915
1916 bg_band->band = IEEE80211_BAND_2GHZ; 1916 bg_band->band = NL80211_BAND_2GHZ;
1917 bg_band->n_channels = geo->bg_channels; 1917 bg_band->n_channels = geo->bg_channels;
1918 bg_band->channels = kcalloc(geo->bg_channels, 1918 bg_band->channels = kcalloc(geo->bg_channels,
1919 sizeof(struct ieee80211_channel), 1919 sizeof(struct ieee80211_channel),
@@ -1924,7 +1924,7 @@ static int ipw2100_wdev_init(struct net_device *dev)
1924 } 1924 }
1925 /* translate geo->bg to bg_band.channels */ 1925 /* translate geo->bg to bg_band.channels */
1926 for (i = 0; i < geo->bg_channels; i++) { 1926 for (i = 0; i < geo->bg_channels; i++) {
1927 bg_band->channels[i].band = IEEE80211_BAND_2GHZ; 1927 bg_band->channels[i].band = NL80211_BAND_2GHZ;
1928 bg_band->channels[i].center_freq = geo->bg[i].freq; 1928 bg_band->channels[i].center_freq = geo->bg[i].freq;
1929 bg_band->channels[i].hw_value = geo->bg[i].channel; 1929 bg_band->channels[i].hw_value = geo->bg[i].channel;
1930 bg_band->channels[i].max_power = geo->bg[i].max_power; 1930 bg_band->channels[i].max_power = geo->bg[i].max_power;
@@ -1945,7 +1945,7 @@ static int ipw2100_wdev_init(struct net_device *dev)
1945 bg_band->bitrates = ipw2100_bg_rates; 1945 bg_band->bitrates = ipw2100_bg_rates;
1946 bg_band->n_bitrates = RATE_COUNT; 1946 bg_band->n_bitrates = RATE_COUNT;
1947 1947
1948 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; 1948 wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
1949 } 1949 }
1950 1950
1951 wdev->wiphy->cipher_suites = ipw_cipher_suites; 1951 wdev->wiphy->cipher_suites = ipw_cipher_suites;
@@ -3521,7 +3521,7 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv)
3521static ssize_t show_pci(struct device *d, struct device_attribute *attr, 3521static ssize_t show_pci(struct device *d, struct device_attribute *attr,
3522 char *buf) 3522 char *buf)
3523{ 3523{
3524 struct pci_dev *pci_dev = container_of(d, struct pci_dev, dev); 3524 struct pci_dev *pci_dev = to_pci_dev(d);
3525 char *out = buf; 3525 char *out = buf;
3526 int i, j; 3526 int i, j;
3527 u32 val; 3527 u32 val;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index ed0adaf1eec4..dac13cf42e9f 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -11359,7 +11359,7 @@ static int ipw_wdev_init(struct net_device *dev)
11359 if (geo->bg_channels) { 11359 if (geo->bg_channels) {
11360 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band; 11360 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11361 11361
11362 bg_band->band = IEEE80211_BAND_2GHZ; 11362 bg_band->band = NL80211_BAND_2GHZ;
11363 bg_band->n_channels = geo->bg_channels; 11363 bg_band->n_channels = geo->bg_channels;
11364 bg_band->channels = kcalloc(geo->bg_channels, 11364 bg_band->channels = kcalloc(geo->bg_channels,
11365 sizeof(struct ieee80211_channel), 11365 sizeof(struct ieee80211_channel),
@@ -11370,7 +11370,7 @@ static int ipw_wdev_init(struct net_device *dev)
11370 } 11370 }
11371 /* translate geo->bg to bg_band.channels */ 11371 /* translate geo->bg to bg_band.channels */
11372 for (i = 0; i < geo->bg_channels; i++) { 11372 for (i = 0; i < geo->bg_channels; i++) {
11373 bg_band->channels[i].band = IEEE80211_BAND_2GHZ; 11373 bg_band->channels[i].band = NL80211_BAND_2GHZ;
11374 bg_band->channels[i].center_freq = geo->bg[i].freq; 11374 bg_band->channels[i].center_freq = geo->bg[i].freq;
11375 bg_band->channels[i].hw_value = geo->bg[i].channel; 11375 bg_band->channels[i].hw_value = geo->bg[i].channel;
11376 bg_band->channels[i].max_power = geo->bg[i].max_power; 11376 bg_band->channels[i].max_power = geo->bg[i].max_power;
@@ -11391,14 +11391,14 @@ static int ipw_wdev_init(struct net_device *dev)
11391 bg_band->bitrates = ipw2200_bg_rates; 11391 bg_band->bitrates = ipw2200_bg_rates;
11392 bg_band->n_bitrates = ipw2200_num_bg_rates; 11392 bg_band->n_bitrates = ipw2200_num_bg_rates;
11393 11393
11394 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; 11394 wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
11395 } 11395 }
11396 11396
11397 /* fill-out priv->ieee->a_band */ 11397 /* fill-out priv->ieee->a_band */
11398 if (geo->a_channels) { 11398 if (geo->a_channels) {
11399 struct ieee80211_supported_band *a_band = &priv->ieee->a_band; 11399 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11400 11400
11401 a_band->band = IEEE80211_BAND_5GHZ; 11401 a_band->band = NL80211_BAND_5GHZ;
11402 a_band->n_channels = geo->a_channels; 11402 a_band->n_channels = geo->a_channels;
11403 a_band->channels = kcalloc(geo->a_channels, 11403 a_band->channels = kcalloc(geo->a_channels,
11404 sizeof(struct ieee80211_channel), 11404 sizeof(struct ieee80211_channel),
@@ -11409,7 +11409,7 @@ static int ipw_wdev_init(struct net_device *dev)
11409 } 11409 }
11410 /* translate geo->a to a_band.channels */ 11410 /* translate geo->a to a_band.channels */
11411 for (i = 0; i < geo->a_channels; i++) { 11411 for (i = 0; i < geo->a_channels; i++) {
11412 a_band->channels[i].band = IEEE80211_BAND_5GHZ; 11412 a_band->channels[i].band = NL80211_BAND_5GHZ;
11413 a_band->channels[i].center_freq = geo->a[i].freq; 11413 a_band->channels[i].center_freq = geo->a[i].freq;
11414 a_band->channels[i].hw_value = geo->a[i].channel; 11414 a_band->channels[i].hw_value = geo->a[i].channel;
11415 a_band->channels[i].max_power = geo->a[i].max_power; 11415 a_band->channels[i].max_power = geo->a[i].max_power;
@@ -11430,7 +11430,7 @@ static int ipw_wdev_init(struct net_device *dev)
11430 a_band->bitrates = ipw2200_a_rates; 11430 a_band->bitrates = ipw2200_a_rates;
11431 a_band->n_bitrates = ipw2200_num_a_rates; 11431 a_band->n_bitrates = ipw2200_num_a_rates;
11432 11432
11433 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band; 11433 wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
11434 } 11434 }
11435 11435
11436 wdev->wiphy->cipher_suites = ipw_cipher_suites; 11436 wdev->wiphy->cipher_suites = ipw_cipher_suites;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index af1b3e6839fa..466912eb2d87 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -1547,7 +1547,7 @@ il3945_irq_tasklet(struct il_priv *il)
1547} 1547}
1548 1548
1549static int 1549static int
1550il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band, 1550il3945_get_channels_for_scan(struct il_priv *il, enum nl80211_band band,
1551 u8 is_active, u8 n_probes, 1551 u8 is_active, u8 n_probes,
1552 struct il3945_scan_channel *scan_ch, 1552 struct il3945_scan_channel *scan_ch,
1553 struct ieee80211_vif *vif) 1553 struct ieee80211_vif *vif)
@@ -1618,7 +1618,7 @@ il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
1618 /* scan_pwr_info->tpc.dsp_atten; */ 1618 /* scan_pwr_info->tpc.dsp_atten; */
1619 1619
1620 /*scan_pwr_info->tpc.tx_gain; */ 1620 /*scan_pwr_info->tpc.tx_gain; */
1621 if (band == IEEE80211_BAND_5GHZ) 1621 if (band == NL80211_BAND_5GHZ)
1622 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; 1622 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1623 else { 1623 else {
1624 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); 1624 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
@@ -2534,7 +2534,7 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
2534 }; 2534 };
2535 struct il3945_scan_cmd *scan; 2535 struct il3945_scan_cmd *scan;
2536 u8 n_probes = 0; 2536 u8 n_probes = 0;
2537 enum ieee80211_band band; 2537 enum nl80211_band band;
2538 bool is_active = false; 2538 bool is_active = false;
2539 int ret; 2539 int ret;
2540 u16 len; 2540 u16 len;
@@ -2615,14 +2615,14 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
2615 /* flags + rate selection */ 2615 /* flags + rate selection */
2616 2616
2617 switch (il->scan_band) { 2617 switch (il->scan_band) {
2618 case IEEE80211_BAND_2GHZ: 2618 case NL80211_BAND_2GHZ:
2619 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 2619 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2620 scan->tx_cmd.rate = RATE_1M_PLCP; 2620 scan->tx_cmd.rate = RATE_1M_PLCP;
2621 band = IEEE80211_BAND_2GHZ; 2621 band = NL80211_BAND_2GHZ;
2622 break; 2622 break;
2623 case IEEE80211_BAND_5GHZ: 2623 case NL80211_BAND_5GHZ:
2624 scan->tx_cmd.rate = RATE_6M_PLCP; 2624 scan->tx_cmd.rate = RATE_6M_PLCP;
2625 band = IEEE80211_BAND_5GHZ; 2625 band = NL80211_BAND_5GHZ;
2626 break; 2626 break;
2627 default: 2627 default:
2628 IL_WARN("Invalid scan band\n"); 2628 IL_WARN("Invalid scan band\n");
@@ -3507,7 +3507,7 @@ il3945_init_drv(struct il_priv *il)
3507 3507
3508 il->ieee_channels = NULL; 3508 il->ieee_channels = NULL;
3509 il->ieee_rates = NULL; 3509 il->ieee_rates = NULL;
3510 il->band = IEEE80211_BAND_2GHZ; 3510 il->band = NL80211_BAND_2GHZ;
3511 3511
3512 il->iw_mode = NL80211_IFTYPE_STATION; 3512 il->iw_mode = NL80211_IFTYPE_STATION;
3513 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; 3513 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
@@ -3582,13 +3582,13 @@ il3945_setup_mac(struct il_priv *il)
3582 /* Default value; 4 EDCA QOS priorities */ 3582 /* Default value; 4 EDCA QOS priorities */
3583 hw->queues = 4; 3583 hw->queues = 4;
3584 3584
3585 if (il->bands[IEEE80211_BAND_2GHZ].n_channels) 3585 if (il->bands[NL80211_BAND_2GHZ].n_channels)
3586 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 3586 il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
3587 &il->bands[IEEE80211_BAND_2GHZ]; 3587 &il->bands[NL80211_BAND_2GHZ];
3588 3588
3589 if (il->bands[IEEE80211_BAND_5GHZ].n_channels) 3589 if (il->bands[NL80211_BAND_5GHZ].n_channels)
3590 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 3590 il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
3591 &il->bands[IEEE80211_BAND_5GHZ]; 3591 &il->bands[NL80211_BAND_5GHZ];
3592 3592
3593 il_leds_init(il); 3593 il_leds_init(il);
3594 3594
@@ -3761,7 +3761,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3761 goto out_release_irq; 3761 goto out_release_irq;
3762 } 3762 }
3763 3763
3764 il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5]); 3764 il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]);
3765 il3945_setup_deferred_work(il); 3765 il3945_setup_deferred_work(il);
3766 il3945_setup_handlers(il); 3766 il3945_setup_handlers(il);
3767 il_power_initialize(il); 3767 il_power_initialize(il);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index 76b0729ade17..03ad9b8b55f4 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -97,7 +97,7 @@ static struct il3945_tpt_entry il3945_tpt_table_g[] = {
97#define RATE_RETRY_TH 15 97#define RATE_RETRY_TH 15
98 98
99static u8 99static u8
100il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band) 100il3945_get_rate_idx_by_rssi(s32 rssi, enum nl80211_band band)
101{ 101{
102 u32 idx = 0; 102 u32 idx = 0;
103 u32 table_size = 0; 103 u32 table_size = 0;
@@ -107,11 +107,11 @@ il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
107 rssi = IL_MIN_RSSI_VAL; 107 rssi = IL_MIN_RSSI_VAL;
108 108
109 switch (band) { 109 switch (band) {
110 case IEEE80211_BAND_2GHZ: 110 case NL80211_BAND_2GHZ:
111 tpt_table = il3945_tpt_table_g; 111 tpt_table = il3945_tpt_table_g;
112 table_size = ARRAY_SIZE(il3945_tpt_table_g); 112 table_size = ARRAY_SIZE(il3945_tpt_table_g);
113 break; 113 break;
114 case IEEE80211_BAND_5GHZ: 114 case NL80211_BAND_5GHZ:
115 tpt_table = il3945_tpt_table_a; 115 tpt_table = il3945_tpt_table_a;
116 table_size = ARRAY_SIZE(il3945_tpt_table_a); 116 table_size = ARRAY_SIZE(il3945_tpt_table_a);
117 break; 117 break;
@@ -380,7 +380,7 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
380 380
381 il->_3945.sta_supp_rates = sta->supp_rates[sband->band]; 381 il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
382 /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */ 382 /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
383 if (sband->band == IEEE80211_BAND_5GHZ) { 383 if (sband->band == NL80211_BAND_5GHZ) {
384 rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE; 384 rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
385 il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE; 385 il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE;
386 } 386 }
@@ -541,7 +541,7 @@ il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
541 541
542static u16 542static u16
543il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask, 543il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
544 enum ieee80211_band band) 544 enum nl80211_band band)
545{ 545{
546 u8 high = RATE_INVALID; 546 u8 high = RATE_INVALID;
547 u8 low = RATE_INVALID; 547 u8 low = RATE_INVALID;
@@ -549,7 +549,7 @@ il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
549 549
550 /* 802.11A walks to the next literal adjacent rate in 550 /* 802.11A walks to the next literal adjacent rate in
551 * the rate table */ 551 * the rate table */
552 if (unlikely(band == IEEE80211_BAND_5GHZ)) { 552 if (unlikely(band == NL80211_BAND_5GHZ)) {
553 int i; 553 int i;
554 u32 mask; 554 u32 mask;
555 555
@@ -657,14 +657,14 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
657 657
658 /* get user max rate if set */ 658 /* get user max rate if set */
659 max_rate_idx = txrc->max_rate_idx; 659 max_rate_idx = txrc->max_rate_idx;
660 if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1) 660 if (sband->band == NL80211_BAND_5GHZ && max_rate_idx != -1)
661 max_rate_idx += IL_FIRST_OFDM_RATE; 661 max_rate_idx += IL_FIRST_OFDM_RATE;
662 if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT) 662 if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
663 max_rate_idx = -1; 663 max_rate_idx = -1;
664 664
665 idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1); 665 idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
666 666
667 if (sband->band == IEEE80211_BAND_5GHZ) 667 if (sband->band == NL80211_BAND_5GHZ)
668 rate_mask = rate_mask << IL_FIRST_OFDM_RATE; 668 rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
669 669
670 spin_lock_irqsave(&rs_sta->lock, flags); 670 spin_lock_irqsave(&rs_sta->lock, flags);
@@ -806,7 +806,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
806 806
807out: 807out:
808 808
809 if (sband->band == IEEE80211_BAND_5GHZ) { 809 if (sband->band == NL80211_BAND_5GHZ) {
810 if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE)) 810 if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
811 idx = IL_FIRST_OFDM_RATE; 811 idx = IL_FIRST_OFDM_RATE;
812 rs_sta->last_txrate_idx = idx; 812 rs_sta->last_txrate_idx = idx;
@@ -935,7 +935,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
935 935
936 rs_sta->tgg = 0; 936 rs_sta->tgg = 0;
937 switch (il->band) { 937 switch (il->band) {
938 case IEEE80211_BAND_2GHZ: 938 case NL80211_BAND_2GHZ:
939 /* TODO: this always does G, not a regression */ 939 /* TODO: this always does G, not a regression */
940 if (il->active.flags & RXON_FLG_TGG_PROTECT_MSK) { 940 if (il->active.flags & RXON_FLG_TGG_PROTECT_MSK) {
941 rs_sta->tgg = 1; 941 rs_sta->tgg = 1;
@@ -943,7 +943,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
943 } else 943 } else
944 rs_sta->expected_tpt = il3945_expected_tpt_g; 944 rs_sta->expected_tpt = il3945_expected_tpt_g;
945 break; 945 break;
946 case IEEE80211_BAND_5GHZ: 946 case NL80211_BAND_5GHZ:
947 rs_sta->expected_tpt = il3945_expected_tpt_a; 947 rs_sta->expected_tpt = il3945_expected_tpt_a;
948 break; 948 break;
949 default: 949 default:
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 93bdf684babe..7bcedbb53d94 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -255,13 +255,13 @@ il3945_rs_next_rate(struct il_priv *il, int rate)
255 int next_rate = il3945_get_prev_ieee_rate(rate); 255 int next_rate = il3945_get_prev_ieee_rate(rate);
256 256
257 switch (il->band) { 257 switch (il->band) {
258 case IEEE80211_BAND_5GHZ: 258 case NL80211_BAND_5GHZ:
259 if (rate == RATE_12M_IDX) 259 if (rate == RATE_12M_IDX)
260 next_rate = RATE_9M_IDX; 260 next_rate = RATE_9M_IDX;
261 else if (rate == RATE_6M_IDX) 261 else if (rate == RATE_6M_IDX)
262 next_rate = RATE_6M_IDX; 262 next_rate = RATE_6M_IDX;
263 break; 263 break;
264 case IEEE80211_BAND_2GHZ: 264 case NL80211_BAND_2GHZ:
265 if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) && 265 if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
266 il_is_associated(il)) { 266 il_is_associated(il)) {
267 if (rate == RATE_11M_IDX) 267 if (rate == RATE_11M_IDX)
@@ -349,7 +349,7 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
349 349
350 /* Fill the MRR chain with some info about on-chip retransmissions */ 350 /* Fill the MRR chain with some info about on-chip retransmissions */
351 rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate); 351 rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
352 if (info->band == IEEE80211_BAND_5GHZ) 352 if (info->band == NL80211_BAND_5GHZ)
353 rate_idx -= IL_FIRST_OFDM_RATE; 353 rate_idx -= IL_FIRST_OFDM_RATE;
354 354
355 fail = tx_resp->failure_frame; 355 fail = tx_resp->failure_frame;
@@ -554,14 +554,14 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
554 rx_status.mactime = le64_to_cpu(rx_end->timestamp); 554 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
555 rx_status.band = 555 rx_status.band =
556 (rx_hdr-> 556 (rx_hdr->
557 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : 557 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
558 IEEE80211_BAND_5GHZ; 558 NL80211_BAND_5GHZ;
559 rx_status.freq = 559 rx_status.freq =
560 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel), 560 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
561 rx_status.band); 561 rx_status.band);
562 562
563 rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate); 563 rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
564 if (rx_status.band == IEEE80211_BAND_5GHZ) 564 if (rx_status.band == NL80211_BAND_5GHZ)
565 rx_status.rate_idx -= IL_FIRST_OFDM_RATE; 565 rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
566 566
567 rx_status.antenna = 567 rx_status.antenna =
@@ -1409,7 +1409,7 @@ il3945_send_tx_power(struct il_priv *il)
1409 1409
1410 chan = le16_to_cpu(il->active.channel); 1410 chan = le16_to_cpu(il->active.channel);
1411 1411
1412 txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1; 1412 txpower.band = (il->band == NL80211_BAND_5GHZ) ? 0 : 1;
1413 ch_info = il_get_channel_info(il, il->band, chan); 1413 ch_info = il_get_channel_info(il, il->band, chan);
1414 if (!ch_info) { 1414 if (!ch_info) {
1415 IL_ERR("Failed to get channel info for channel %d [%d]\n", chan, 1415 IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
@@ -2310,7 +2310,7 @@ il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
2310 2310
2311 il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id, 2311 il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
2312 (il->band == 2312 (il->band ==
2313 IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : 2313 NL80211_BAND_5GHZ) ? RATE_6M_PLCP :
2314 RATE_1M_PLCP); 2314 RATE_1M_PLCP);
2315 il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id); 2315 il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
2316 2316
@@ -2343,7 +2343,7 @@ il3945_init_hw_rate_table(struct il_priv *il)
2343 } 2343 }
2344 2344
2345 switch (il->band) { 2345 switch (il->band) {
2346 case IEEE80211_BAND_5GHZ: 2346 case NL80211_BAND_5GHZ:
2347 D_RATE("Select A mode rate scale\n"); 2347 D_RATE("Select A mode rate scale\n");
2348 /* If one of the following CCK rates is used, 2348 /* If one of the following CCK rates is used,
2349 * have it fall back to the 6M OFDM rate */ 2349 * have it fall back to the 6M OFDM rate */
@@ -2359,7 +2359,7 @@ il3945_init_hw_rate_table(struct il_priv *il)
2359 il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx; 2359 il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
2360 break; 2360 break;
2361 2361
2362 case IEEE80211_BAND_2GHZ: 2362 case NL80211_BAND_2GHZ:
2363 D_RATE("Select B/G mode rate scale\n"); 2363 D_RATE("Select B/G mode rate scale\n");
2364 /* If an OFDM rate is used, have it fall back to the 2364 /* If an OFDM rate is used, have it fall back to the
2365 * 1M CCK rates */ 2365 * 1M CCK rates */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index b75f4ef3cdc7..a91d170a614b 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -457,7 +457,7 @@ il4965_rxq_stop(struct il_priv *il)
457} 457}
458 458
459int 459int
460il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) 460il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
461{ 461{
462 int idx = 0; 462 int idx = 0;
463 int band_offset = 0; 463 int band_offset = 0;
@@ -468,7 +468,7 @@ il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
468 return idx; 468 return idx;
469 /* Legacy rate format, search for match in table */ 469 /* Legacy rate format, search for match in table */
470 } else { 470 } else {
471 if (band == IEEE80211_BAND_5GHZ) 471 if (band == NL80211_BAND_5GHZ)
472 band_offset = IL_FIRST_OFDM_RATE; 472 band_offset = IL_FIRST_OFDM_RATE;
473 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++) 473 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
474 if (il_rates[idx].plcp == (rate_n_flags & 0xFF)) 474 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
@@ -688,8 +688,8 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
688 rx_status.mactime = le64_to_cpu(phy_res->timestamp); 688 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
689 rx_status.band = 689 rx_status.band =
690 (phy_res-> 690 (phy_res->
691 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : 691 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
692 IEEE80211_BAND_5GHZ; 692 NL80211_BAND_5GHZ;
693 rx_status.freq = 693 rx_status.freq =
694 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), 694 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
695 rx_status.band); 695 rx_status.band);
@@ -766,7 +766,7 @@ il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
766 766
767static int 767static int
768il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif, 768il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
769 enum ieee80211_band band, u8 is_active, 769 enum nl80211_band band, u8 is_active,
770 u8 n_probes, struct il_scan_channel *scan_ch) 770 u8 n_probes, struct il_scan_channel *scan_ch)
771{ 771{
772 struct ieee80211_channel *chan; 772 struct ieee80211_channel *chan;
@@ -822,7 +822,7 @@ il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
822 * power level: 822 * power level:
823 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; 823 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
824 */ 824 */
825 if (band == IEEE80211_BAND_5GHZ) 825 if (band == NL80211_BAND_5GHZ)
826 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; 826 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
827 else 827 else
828 scan_ch->tx_gain = ((1 << 5) | (5 << 3)); 828 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -870,7 +870,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
870 u32 rate_flags = 0; 870 u32 rate_flags = 0;
871 u16 cmd_len; 871 u16 cmd_len;
872 u16 rx_chain = 0; 872 u16 rx_chain = 0;
873 enum ieee80211_band band; 873 enum nl80211_band band;
874 u8 n_probes = 0; 874 u8 n_probes = 0;
875 u8 rx_ant = il->hw_params.valid_rx_ant; 875 u8 rx_ant = il->hw_params.valid_rx_ant;
876 u8 rate; 876 u8 rate;
@@ -944,7 +944,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
944 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 944 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
945 945
946 switch (il->scan_band) { 946 switch (il->scan_band) {
947 case IEEE80211_BAND_2GHZ: 947 case NL80211_BAND_2GHZ:
948 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 948 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
949 chan_mod = 949 chan_mod =
950 le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >> 950 le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
@@ -956,7 +956,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
956 rate_flags = RATE_MCS_CCK_MSK; 956 rate_flags = RATE_MCS_CCK_MSK;
957 } 957 }
958 break; 958 break;
959 case IEEE80211_BAND_5GHZ: 959 case NL80211_BAND_5GHZ:
960 rate = RATE_6M_PLCP; 960 rate = RATE_6M_PLCP;
961 break; 961 break;
962 default: 962 default:
@@ -1590,7 +1590,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il,
1590 || rate_idx > RATE_COUNT_LEGACY) 1590 || rate_idx > RATE_COUNT_LEGACY)
1591 rate_idx = rate_lowest_index(&il->bands[info->band], sta); 1591 rate_idx = rate_lowest_index(&il->bands[info->band], sta);
1592 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 1592 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
1593 if (info->band == IEEE80211_BAND_5GHZ) 1593 if (info->band == NL80211_BAND_5GHZ)
1594 rate_idx += IL_FIRST_OFDM_RATE; 1594 rate_idx += IL_FIRST_OFDM_RATE;
1595 /* Get PLCP rate for tx_cmd->rate_n_flags */ 1595 /* Get PLCP rate for tx_cmd->rate_n_flags */
1596 rate_plcp = il_rates[rate_idx].plcp; 1596 rate_plcp = il_rates[rate_idx].plcp;
@@ -3051,7 +3051,7 @@ il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
3051 } 3051 }
3052 /* Set up the rate scaling to start at selected rate, fall back 3052 /* Set up the rate scaling to start at selected rate, fall back
3053 * all the way down to 1M in IEEE order, and then spin on 1M */ 3053 * all the way down to 1M in IEEE order, and then spin on 1M */
3054 if (il->band == IEEE80211_BAND_5GHZ) 3054 if (il->band == NL80211_BAND_5GHZ)
3055 r = RATE_6M_IDX; 3055 r = RATE_6M_IDX;
3056 else 3056 else
3057 r = RATE_1M_IDX; 3057 r = RATE_1M_IDX;
@@ -5553,6 +5553,7 @@ __il4965_up(struct il_priv *il)
5553 5553
5554 il4965_prepare_card_hw(il); 5554 il4965_prepare_card_hw(il);
5555 if (!il->hw_ready) { 5555 if (!il->hw_ready) {
5556 il_dealloc_bcast_stations(il);
5556 IL_ERR("HW not ready\n"); 5557 IL_ERR("HW not ready\n");
5557 return -EIO; 5558 return -EIO;
5558 } 5559 }
@@ -5564,6 +5565,7 @@ __il4965_up(struct il_priv *il)
5564 set_bit(S_RFKILL, &il->status); 5565 set_bit(S_RFKILL, &il->status);
5565 wiphy_rfkill_set_hw_state(il->hw->wiphy, true); 5566 wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
5566 5567
5568 il_dealloc_bcast_stations(il);
5567 il_enable_rfkill_int(il); 5569 il_enable_rfkill_int(il);
5568 IL_WARN("Radio disabled by HW RF Kill switch\n"); 5570 IL_WARN("Radio disabled by HW RF Kill switch\n");
5569 return 0; 5571 return 0;
@@ -5577,6 +5579,7 @@ __il4965_up(struct il_priv *il)
5577 ret = il4965_hw_nic_init(il); 5579 ret = il4965_hw_nic_init(il);
5578 if (ret) { 5580 if (ret) {
5579 IL_ERR("Unable to init nic\n"); 5581 IL_ERR("Unable to init nic\n");
5582 il_dealloc_bcast_stations(il);
5580 return ret; 5583 return ret;
5581 } 5584 }
5582 5585
@@ -5787,12 +5790,12 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5787 5790
5788 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL; 5791 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
5789 5792
5790 if (il->bands[IEEE80211_BAND_2GHZ].n_channels) 5793 if (il->bands[NL80211_BAND_2GHZ].n_channels)
5791 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 5794 il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
5792 &il->bands[IEEE80211_BAND_2GHZ]; 5795 &il->bands[NL80211_BAND_2GHZ];
5793 if (il->bands[IEEE80211_BAND_5GHZ].n_channels) 5796 if (il->bands[NL80211_BAND_5GHZ].n_channels)
5794 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 5797 il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
5795 &il->bands[IEEE80211_BAND_5GHZ]; 5798 &il->bands[NL80211_BAND_5GHZ];
5796 5799
5797 il_leds_init(il); 5800 il_leds_init(il);
5798 5801
@@ -6365,7 +6368,7 @@ il4965_init_drv(struct il_priv *il)
6365 6368
6366 il->ieee_channels = NULL; 6369 il->ieee_channels = NULL;
6367 il->ieee_rates = NULL; 6370 il->ieee_rates = NULL;
6368 il->band = IEEE80211_BAND_2GHZ; 6371 il->band = NL80211_BAND_2GHZ;
6369 6372
6370 il->iw_mode = NL80211_IFTYPE_STATION; 6373 il->iw_mode = NL80211_IFTYPE_STATION;
6371 il->current_ht_config.smps = IEEE80211_SMPS_STATIC; 6374 il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
@@ -6477,7 +6480,7 @@ il4965_set_hw_params(struct il_priv *il)
6477 il->hw_params.max_data_size = IL49_RTC_DATA_SIZE; 6480 il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
6478 il->hw_params.max_inst_size = IL49_RTC_INST_SIZE; 6481 il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
6479 il->hw_params.max_bsm_size = BSM_SRAM_SIZE; 6482 il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
6480 il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ); 6483 il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ);
6481 6484
6482 il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR; 6485 il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
6483 6486
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index bac60b2bc3f0..a867ae7f4095 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -549,7 +549,7 @@ il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
549 */ 549 */
550static int 550static int
551il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags, 551il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
552 enum ieee80211_band band, 552 enum nl80211_band band,
553 struct il_scale_tbl_info *tbl, int *rate_idx) 553 struct il_scale_tbl_info *tbl, int *rate_idx)
554{ 554{
555 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); 555 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
@@ -574,7 +574,7 @@ il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
574 /* legacy rate format */ 574 /* legacy rate format */
575 if (!(rate_n_flags & RATE_MCS_HT_MSK)) { 575 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
576 if (il4965_num_of_ant == 1) { 576 if (il4965_num_of_ant == 1) {
577 if (band == IEEE80211_BAND_5GHZ) 577 if (band == NL80211_BAND_5GHZ)
578 tbl->lq_type = LQ_A; 578 tbl->lq_type = LQ_A;
579 else 579 else
580 tbl->lq_type = LQ_G; 580 tbl->lq_type = LQ_G;
@@ -743,7 +743,7 @@ il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
743 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) { 743 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
744 switch_to_legacy = 1; 744 switch_to_legacy = 1;
745 scale_idx = rs_ht_to_legacy[scale_idx]; 745 scale_idx = rs_ht_to_legacy[scale_idx];
746 if (lq_sta->band == IEEE80211_BAND_5GHZ) 746 if (lq_sta->band == NL80211_BAND_5GHZ)
747 tbl->lq_type = LQ_A; 747 tbl->lq_type = LQ_A;
748 else 748 else
749 tbl->lq_type = LQ_G; 749 tbl->lq_type = LQ_G;
@@ -762,7 +762,7 @@ il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
762 /* Mask with station rate restriction */ 762 /* Mask with station rate restriction */
763 if (is_legacy(tbl->lq_type)) { 763 if (is_legacy(tbl->lq_type)) {
764 /* supp_rates has no CCK bits in A mode */ 764 /* supp_rates has no CCK bits in A mode */
765 if (lq_sta->band == IEEE80211_BAND_5GHZ) 765 if (lq_sta->band == NL80211_BAND_5GHZ)
766 rate_mask = 766 rate_mask =
767 (u16) (rate_mask & 767 (u16) (rate_mask &
768 (lq_sta->supp_rates << IL_FIRST_OFDM_RATE)); 768 (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
@@ -851,7 +851,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
851 table = &lq_sta->lq; 851 table = &lq_sta->lq;
852 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); 852 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
853 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx); 853 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
854 if (il->band == IEEE80211_BAND_5GHZ) 854 if (il->band == NL80211_BAND_5GHZ)
855 rs_idx -= IL_FIRST_OFDM_RATE; 855 rs_idx -= IL_FIRST_OFDM_RATE;
856 mac_flags = info->status.rates[0].flags; 856 mac_flags = info->status.rates[0].flags;
857 mac_idx = info->status.rates[0].idx; 857 mac_idx = info->status.rates[0].idx;
@@ -864,7 +864,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
864 * mac80211 HT idx is always zero-idxed; we need to move 864 * mac80211 HT idx is always zero-idxed; we need to move
865 * HT OFDM rates after CCK rates in 2.4 GHz band 865 * HT OFDM rates after CCK rates in 2.4 GHz band
866 */ 866 */
867 if (il->band == IEEE80211_BAND_2GHZ) 867 if (il->band == NL80211_BAND_2GHZ)
868 mac_idx += IL_FIRST_OFDM_RATE; 868 mac_idx += IL_FIRST_OFDM_RATE;
869 } 869 }
870 /* Here we actually compare this rate to the latest LQ command */ 870 /* Here we actually compare this rate to the latest LQ command */
@@ -1816,7 +1816,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
1816 1816
1817 /* mask with station rate restriction */ 1817 /* mask with station rate restriction */
1818 if (is_legacy(tbl->lq_type)) { 1818 if (is_legacy(tbl->lq_type)) {
1819 if (lq_sta->band == IEEE80211_BAND_5GHZ) 1819 if (lq_sta->band == NL80211_BAND_5GHZ)
1820 /* supp_rates has no CCK bits in A mode */ 1820 /* supp_rates has no CCK bits in A mode */
1821 rate_scale_idx_msk = 1821 rate_scale_idx_msk =
1822 (u16) (rate_mask & 1822 (u16) (rate_mask &
@@ -2212,7 +2212,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
2212 /* Get max rate if user set max rate */ 2212 /* Get max rate if user set max rate */
2213 if (lq_sta) { 2213 if (lq_sta) {
2214 lq_sta->max_rate_idx = txrc->max_rate_idx; 2214 lq_sta->max_rate_idx = txrc->max_rate_idx;
2215 if (sband->band == IEEE80211_BAND_5GHZ && 2215 if (sband->band == NL80211_BAND_5GHZ &&
2216 lq_sta->max_rate_idx != -1) 2216 lq_sta->max_rate_idx != -1)
2217 lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE; 2217 lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
2218 if (lq_sta->max_rate_idx < 0 || 2218 if (lq_sta->max_rate_idx < 0 ||
@@ -2258,11 +2258,11 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
2258 } else { 2258 } else {
2259 /* Check for invalid rates */ 2259 /* Check for invalid rates */
2260 if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY || 2260 if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
2261 (sband->band == IEEE80211_BAND_5GHZ && 2261 (sband->band == NL80211_BAND_5GHZ &&
2262 rate_idx < IL_FIRST_OFDM_RATE)) 2262 rate_idx < IL_FIRST_OFDM_RATE))
2263 rate_idx = rate_lowest_index(sband, sta); 2263 rate_idx = rate_lowest_index(sband, sta);
2264 /* On valid 5 GHz rate, adjust idx */ 2264 /* On valid 5 GHz rate, adjust idx */
2265 else if (sband->band == IEEE80211_BAND_5GHZ) 2265 else if (sband->band == NL80211_BAND_5GHZ)
2266 rate_idx -= IL_FIRST_OFDM_RATE; 2266 rate_idx -= IL_FIRST_OFDM_RATE;
2267 info->control.rates[0].flags = 0; 2267 info->control.rates[0].flags = 0;
2268 } 2268 }
@@ -2362,7 +2362,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
2362 2362
2363 /* Set last_txrate_idx to lowest rate */ 2363 /* Set last_txrate_idx to lowest rate */
2364 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); 2364 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2365 if (sband->band == IEEE80211_BAND_5GHZ) 2365 if (sband->band == NL80211_BAND_5GHZ)
2366 lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE; 2366 lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
2367 lq_sta->is_agg = 0; 2367 lq_sta->is_agg = 0;
2368 2368
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index fe47db9c20cd..c3c638ed0ed7 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -1267,7 +1267,7 @@ il4965_send_tx_power(struct il_priv *il)
1267 "TX Power requested while scanning!\n")) 1267 "TX Power requested while scanning!\n"))
1268 return -EAGAIN; 1268 return -EAGAIN;
1269 1269
1270 band = il->band == IEEE80211_BAND_2GHZ; 1270 band = il->band == NL80211_BAND_2GHZ;
1271 1271
1272 is_ht40 = iw4965_is_ht40_channel(il->active.flags); 1272 is_ht40 = iw4965_is_ht40_channel(il->active.flags);
1273 1273
@@ -1480,7 +1480,7 @@ il4965_hw_channel_switch(struct il_priv *il,
1480 u8 switch_count; 1480 u8 switch_count;
1481 u16 beacon_interval = le16_to_cpu(il->timing.beacon_interval); 1481 u16 beacon_interval = le16_to_cpu(il->timing.beacon_interval);
1482 struct ieee80211_vif *vif = il->vif; 1482 struct ieee80211_vif *vif = il->vif;
1483 band = (il->band == IEEE80211_BAND_2GHZ); 1483 band = (il->band == NL80211_BAND_2GHZ);
1484 1484
1485 if (WARN_ON_ONCE(vif == NULL)) 1485 if (WARN_ON_ONCE(vif == NULL))
1486 return -EIO; 1486 return -EIO;
@@ -1918,7 +1918,7 @@ struct il_cfg il4965_cfg = {
1918 * Force use of chains B and C for scan RX on 5 GHz band 1918 * Force use of chains B and C for scan RX on 5 GHz band
1919 * because the device has off-channel reception on chain A. 1919 * because the device has off-channel reception on chain A.
1920 */ 1920 */
1921 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC, 1921 .scan_rx_antennas[NL80211_BAND_5GHZ] = ANT_BC,
1922 1922
1923 .eeprom_size = IL4965_EEPROM_IMG_SIZE, 1923 .eeprom_size = IL4965_EEPROM_IMG_SIZE,
1924 .num_of_queues = IL49_NUM_QUEUES, 1924 .num_of_queues = IL49_NUM_QUEUES,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h
index e432715e02d8..527e8b531aed 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.h
+++ b/drivers/net/wireless/intel/iwlegacy/4965.h
@@ -68,7 +68,7 @@ void il4965_rx_replenish(struct il_priv *il);
68void il4965_rx_replenish_now(struct il_priv *il); 68void il4965_rx_replenish_now(struct il_priv *il);
69void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq); 69void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
70int il4965_rxq_stop(struct il_priv *il); 70int il4965_rxq_stop(struct il_priv *il);
71int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); 71int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band);
72void il4965_rx_handle(struct il_priv *il); 72void il4965_rx_handle(struct il_priv *il);
73 73
74/* tx */ 74/* tx */
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index eb5cb603bc52..eb24b9241bb2 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -723,10 +723,9 @@ il_eeprom_init(struct il_priv *il)
723 sz = il->cfg->eeprom_size; 723 sz = il->cfg->eeprom_size;
724 D_EEPROM("NVM size = %d\n", sz); 724 D_EEPROM("NVM size = %d\n", sz);
725 il->eeprom = kzalloc(sz, GFP_KERNEL); 725 il->eeprom = kzalloc(sz, GFP_KERNEL);
726 if (!il->eeprom) { 726 if (!il->eeprom)
727 ret = -ENOMEM; 727 return -ENOMEM;
728 goto alloc_err; 728
729 }
730 e = (__le16 *) il->eeprom; 729 e = (__le16 *) il->eeprom;
731 730
732 il->ops->apm_init(il); 731 il->ops->apm_init(il);
@@ -778,7 +777,6 @@ err:
778 il_eeprom_free(il); 777 il_eeprom_free(il);
779 /* Reset chip to save power until we load uCode during "up". */ 778 /* Reset chip to save power until we load uCode during "up". */
780 il_apm_stop(il); 779 il_apm_stop(il);
781alloc_err:
782 return ret; 780 return ret;
783} 781}
784EXPORT_SYMBOL(il_eeprom_init); 782EXPORT_SYMBOL(il_eeprom_init);
@@ -862,7 +860,7 @@ il_init_band_reference(const struct il_priv *il, int eep_band,
862 * Does not set up a command, or touch hardware. 860 * Does not set up a command, or touch hardware.
863 */ 861 */
864static int 862static int
865il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel, 863il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel,
866 const struct il_eeprom_channel *eeprom_ch, 864 const struct il_eeprom_channel *eeprom_ch,
867 u8 clear_ht40_extension_channel) 865 u8 clear_ht40_extension_channel)
868{ 866{
@@ -947,7 +945,7 @@ il_init_channel_map(struct il_priv *il)
947 ch_info->channel = eeprom_ch_idx[ch]; 945 ch_info->channel = eeprom_ch_idx[ch];
948 ch_info->band = 946 ch_info->band =
949 (band == 947 (band ==
950 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 948 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
951 949
952 /* permanently store EEPROM's channel regulatory flags 950 /* permanently store EEPROM's channel regulatory flags
953 * and max power in channel info database. */ 951 * and max power in channel info database. */
@@ -1005,14 +1003,14 @@ il_init_channel_map(struct il_priv *il)
1005 1003
1006 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ 1004 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
1007 for (band = 6; band <= 7; band++) { 1005 for (band = 6; band <= 7; band++) {
1008 enum ieee80211_band ieeeband; 1006 enum nl80211_band ieeeband;
1009 1007
1010 il_init_band_reference(il, band, &eeprom_ch_count, 1008 il_init_band_reference(il, band, &eeprom_ch_count,
1011 &eeprom_ch_info, &eeprom_ch_idx); 1009 &eeprom_ch_info, &eeprom_ch_idx);
1012 1010
1013 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ 1011 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
1014 ieeeband = 1012 ieeeband =
1015 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 1013 (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
1016 1014
1017 /* Loop through each band adding each of the channels */ 1015 /* Loop through each band adding each of the channels */
1018 for (ch = 0; ch < eeprom_ch_count; ch++) { 1016 for (ch = 0; ch < eeprom_ch_count; ch++) {
@@ -1050,19 +1048,19 @@ EXPORT_SYMBOL(il_free_channel_map);
1050 * Based on band and channel number. 1048 * Based on band and channel number.
1051 */ 1049 */
1052const struct il_channel_info * 1050const struct il_channel_info *
1053il_get_channel_info(const struct il_priv *il, enum ieee80211_band band, 1051il_get_channel_info(const struct il_priv *il, enum nl80211_band band,
1054 u16 channel) 1052 u16 channel)
1055{ 1053{
1056 int i; 1054 int i;
1057 1055
1058 switch (band) { 1056 switch (band) {
1059 case IEEE80211_BAND_5GHZ: 1057 case NL80211_BAND_5GHZ:
1060 for (i = 14; i < il->channel_count; i++) { 1058 for (i = 14; i < il->channel_count; i++) {
1061 if (il->channel_info[i].channel == channel) 1059 if (il->channel_info[i].channel == channel)
1062 return &il->channel_info[i]; 1060 return &il->channel_info[i];
1063 } 1061 }
1064 break; 1062 break;
1065 case IEEE80211_BAND_2GHZ: 1063 case NL80211_BAND_2GHZ:
1066 if (channel >= 1 && channel <= 14) 1064 if (channel >= 1 && channel <= 14)
1067 return &il->channel_info[channel - 1]; 1065 return &il->channel_info[channel - 1];
1068 break; 1066 break;
@@ -1459,7 +1457,7 @@ il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1459 clear_bit(S_SCAN_HW, &il->status); 1457 clear_bit(S_SCAN_HW, &il->status);
1460 1458
1461 D_SCAN("Scan on %sGHz took %dms\n", 1459 D_SCAN("Scan on %sGHz took %dms\n",
1462 (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", 1460 (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
1463 jiffies_to_msecs(jiffies - il->scan_start)); 1461 jiffies_to_msecs(jiffies - il->scan_start));
1464 1462
1465 queue_work(il->workqueue, &il->scan_completed); 1463 queue_work(il->workqueue, &il->scan_completed);
@@ -1477,10 +1475,10 @@ il_setup_rx_scan_handlers(struct il_priv *il)
1477EXPORT_SYMBOL(il_setup_rx_scan_handlers); 1475EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1478 1476
1479u16 1477u16
1480il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, 1478il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
1481 u8 n_probes) 1479 u8 n_probes)
1482{ 1480{
1483 if (band == IEEE80211_BAND_5GHZ) 1481 if (band == NL80211_BAND_5GHZ)
1484 return IL_ACTIVE_DWELL_TIME_52 + 1482 return IL_ACTIVE_DWELL_TIME_52 +
1485 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); 1483 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1486 else 1484 else
@@ -1490,14 +1488,14 @@ il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1490EXPORT_SYMBOL(il_get_active_dwell_time); 1488EXPORT_SYMBOL(il_get_active_dwell_time);
1491 1489
1492u16 1490u16
1493il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band, 1491il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
1494 struct ieee80211_vif *vif) 1492 struct ieee80211_vif *vif)
1495{ 1493{
1496 u16 value; 1494 u16 value;
1497 1495
1498 u16 passive = 1496 u16 passive =
1499 (band == 1497 (band ==
1500 IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE + 1498 NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
1501 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE + 1499 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
1502 IL_PASSIVE_DWELL_TIME_52; 1500 IL_PASSIVE_DWELL_TIME_52;
1503 1501
@@ -1522,10 +1520,10 @@ void
1522il_init_scan_params(struct il_priv *il) 1520il_init_scan_params(struct il_priv *il)
1523{ 1521{
1524 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1; 1522 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1525 if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ]) 1523 if (!il->scan_tx_ant[NL80211_BAND_5GHZ])
1526 il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; 1524 il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
1527 if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ]) 1525 if (!il->scan_tx_ant[NL80211_BAND_2GHZ])
1528 il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; 1526 il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
1529} 1527}
1530EXPORT_SYMBOL(il_init_scan_params); 1528EXPORT_SYMBOL(il_init_scan_params);
1531 1529
@@ -2005,7 +2003,7 @@ il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
2005 il_set_ht_add_station(il, sta_id, sta); 2003 il_set_ht_add_station(il, sta_id, sta);
2006 2004
2007 /* 3945 only */ 2005 /* 3945 only */
2008 rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; 2006 rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
2009 /* Turn on both antennas for the station... */ 2007 /* Turn on both antennas for the station... */
2010 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); 2008 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
2011 2009
@@ -2794,8 +2792,10 @@ il_tx_queue_free(struct il_priv *il, int txq_id)
2794 il_tx_queue_unmap(il, txq_id); 2792 il_tx_queue_unmap(il, txq_id);
2795 2793
2796 /* De-alloc array of command/tx buffers */ 2794 /* De-alloc array of command/tx buffers */
2797 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 2795 if (txq->cmd) {
2798 kfree(txq->cmd[i]); 2796 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2797 kfree(txq->cmd[i]);
2798 }
2799 2799
2800 /* De-alloc circular buffer of TFDs */ 2800 /* De-alloc circular buffer of TFDs */
2801 if (txq->q.n_bd) 2801 if (txq->q.n_bd)
@@ -2873,8 +2873,10 @@ il_cmd_queue_free(struct il_priv *il)
2873 il_cmd_queue_unmap(il); 2873 il_cmd_queue_unmap(il);
2874 2874
2875 /* De-alloc array of command/tx buffers */ 2875 /* De-alloc array of command/tx buffers */
2876 for (i = 0; i <= TFD_CMD_SLOTS; i++) 2876 if (txq->cmd) {
2877 kfree(txq->cmd[i]); 2877 for (i = 0; i <= TFD_CMD_SLOTS; i++)
2878 kfree(txq->cmd[i]);
2879 }
2878 2880
2879 /* De-alloc circular buffer of TFDs */ 2881 /* De-alloc circular buffer of TFDs */
2880 if (txq->q.n_bd) 2882 if (txq->q.n_bd)
@@ -3080,7 +3082,9 @@ err:
3080 kfree(txq->cmd[i]); 3082 kfree(txq->cmd[i]);
3081out_free_arrays: 3083out_free_arrays:
3082 kfree(txq->meta); 3084 kfree(txq->meta);
3085 txq->meta = NULL;
3083 kfree(txq->cmd); 3086 kfree(txq->cmd);
3087 txq->cmd = NULL;
3084 3088
3085 return -ENOMEM; 3089 return -ENOMEM;
3086} 3090}
@@ -3378,7 +3382,7 @@ EXPORT_SYMBOL(il_bcast_addr);
3378static void 3382static void
3379il_init_ht_hw_capab(const struct il_priv *il, 3383il_init_ht_hw_capab(const struct il_priv *il,
3380 struct ieee80211_sta_ht_cap *ht_info, 3384 struct ieee80211_sta_ht_cap *ht_info,
3381 enum ieee80211_band band) 3385 enum nl80211_band band)
3382{ 3386{
3383 u16 max_bit_rate = 0; 3387 u16 max_bit_rate = 0;
3384 u8 rx_chains_num = il->hw_params.rx_chains_num; 3388 u8 rx_chains_num = il->hw_params.rx_chains_num;
@@ -3439,8 +3443,8 @@ il_init_geos(struct il_priv *il)
3439 int i = 0; 3443 int i = 0;
3440 s8 max_tx_power = 0; 3444 s8 max_tx_power = 0;
3441 3445
3442 if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates || 3446 if (il->bands[NL80211_BAND_2GHZ].n_bitrates ||
3443 il->bands[IEEE80211_BAND_5GHZ].n_bitrates) { 3447 il->bands[NL80211_BAND_5GHZ].n_bitrates) {
3444 D_INFO("Geography modes already initialized.\n"); 3448 D_INFO("Geography modes already initialized.\n");
3445 set_bit(S_GEO_CONFIGURED, &il->status); 3449 set_bit(S_GEO_CONFIGURED, &il->status);
3446 return 0; 3450 return 0;
@@ -3461,23 +3465,23 @@ il_init_geos(struct il_priv *il)
3461 } 3465 }
3462 3466
3463 /* 5.2GHz channels start after the 2.4GHz channels */ 3467 /* 5.2GHz channels start after the 2.4GHz channels */
3464 sband = &il->bands[IEEE80211_BAND_5GHZ]; 3468 sband = &il->bands[NL80211_BAND_5GHZ];
3465 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)]; 3469 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
3466 /* just OFDM */ 3470 /* just OFDM */
3467 sband->bitrates = &rates[IL_FIRST_OFDM_RATE]; 3471 sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
3468 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE; 3472 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
3469 3473
3470 if (il->cfg->sku & IL_SKU_N) 3474 if (il->cfg->sku & IL_SKU_N)
3471 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ); 3475 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ);
3472 3476
3473 sband = &il->bands[IEEE80211_BAND_2GHZ]; 3477 sband = &il->bands[NL80211_BAND_2GHZ];
3474 sband->channels = channels; 3478 sband->channels = channels;
3475 /* OFDM & CCK */ 3479 /* OFDM & CCK */
3476 sband->bitrates = rates; 3480 sband->bitrates = rates;
3477 sband->n_bitrates = RATE_COUNT_LEGACY; 3481 sband->n_bitrates = RATE_COUNT_LEGACY;
3478 3482
3479 if (il->cfg->sku & IL_SKU_N) 3483 if (il->cfg->sku & IL_SKU_N)
3480 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ); 3484 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ);
3481 3485
3482 il->ieee_channels = channels; 3486 il->ieee_channels = channels;
3483 il->ieee_rates = rates; 3487 il->ieee_rates = rates;
@@ -3528,7 +3532,7 @@ il_init_geos(struct il_priv *il)
3528 il->tx_power_user_lmt = max_tx_power; 3532 il->tx_power_user_lmt = max_tx_power;
3529 il->tx_power_next = max_tx_power; 3533 il->tx_power_next = max_tx_power;
3530 3534
3531 if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 && 3535 if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 &&
3532 (il->cfg->sku & IL_SKU_A)) { 3536 (il->cfg->sku & IL_SKU_A)) {
3533 IL_INFO("Incorrectly detected BG card as ABG. " 3537 IL_INFO("Incorrectly detected BG card as ABG. "
3534 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", 3538 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
@@ -3537,8 +3541,8 @@ il_init_geos(struct il_priv *il)
3537 } 3541 }
3538 3542
3539 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n", 3543 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
3540 il->bands[IEEE80211_BAND_2GHZ].n_channels, 3544 il->bands[NL80211_BAND_2GHZ].n_channels,
3541 il->bands[IEEE80211_BAND_5GHZ].n_channels); 3545 il->bands[NL80211_BAND_5GHZ].n_channels);
3542 3546
3543 set_bit(S_GEO_CONFIGURED, &il->status); 3547 set_bit(S_GEO_CONFIGURED, &il->status);
3544 3548
@@ -3559,7 +3563,7 @@ il_free_geos(struct il_priv *il)
3559EXPORT_SYMBOL(il_free_geos); 3563EXPORT_SYMBOL(il_free_geos);
3560 3564
3561static bool 3565static bool
3562il_is_channel_extension(struct il_priv *il, enum ieee80211_band band, 3566il_is_channel_extension(struct il_priv *il, enum nl80211_band band,
3563 u16 channel, u8 extension_chan_offset) 3567 u16 channel, u8 extension_chan_offset)
3564{ 3568{
3565 const struct il_channel_info *ch_info; 3569 const struct il_channel_info *ch_info;
@@ -3922,14 +3926,14 @@ EXPORT_SYMBOL(il_set_rxon_ht);
3922 3926
3923/* Return valid, unused, channel for a passive scan to reset the RF */ 3927/* Return valid, unused, channel for a passive scan to reset the RF */
3924u8 3928u8
3925il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band) 3929il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
3926{ 3930{
3927 const struct il_channel_info *ch_info; 3931 const struct il_channel_info *ch_info;
3928 int i; 3932 int i;
3929 u8 channel = 0; 3933 u8 channel = 0;
3930 u8 min, max; 3934 u8 min, max;
3931 3935
3932 if (band == IEEE80211_BAND_5GHZ) { 3936 if (band == NL80211_BAND_5GHZ) {
3933 min = 14; 3937 min = 14;
3934 max = il->channel_count; 3938 max = il->channel_count;
3935 } else { 3939 } else {
@@ -3961,14 +3965,14 @@ EXPORT_SYMBOL(il_get_single_channel_number);
3961int 3965int
3962il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch) 3966il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
3963{ 3967{
3964 enum ieee80211_band band = ch->band; 3968 enum nl80211_band band = ch->band;
3965 u16 channel = ch->hw_value; 3969 u16 channel = ch->hw_value;
3966 3970
3967 if (le16_to_cpu(il->staging.channel) == channel && il->band == band) 3971 if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
3968 return 0; 3972 return 0;
3969 3973
3970 il->staging.channel = cpu_to_le16(channel); 3974 il->staging.channel = cpu_to_le16(channel);
3971 if (band == IEEE80211_BAND_5GHZ) 3975 if (band == NL80211_BAND_5GHZ)
3972 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK; 3976 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3973 else 3977 else
3974 il->staging.flags |= RXON_FLG_BAND_24G_MSK; 3978 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
@@ -3982,10 +3986,10 @@ il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
3982EXPORT_SYMBOL(il_set_rxon_channel); 3986EXPORT_SYMBOL(il_set_rxon_channel);
3983 3987
3984void 3988void
3985il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band, 3989il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
3986 struct ieee80211_vif *vif) 3990 struct ieee80211_vif *vif)
3987{ 3991{
3988 if (band == IEEE80211_BAND_5GHZ) { 3992 if (band == NL80211_BAND_5GHZ) {
3989 il->staging.flags &= 3993 il->staging.flags &=
3990 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | 3994 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
3991 RXON_FLG_CCK_MSK); 3995 RXON_FLG_CCK_MSK);
@@ -5411,7 +5415,7 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5411 5415
5412 if (changes & BSS_CHANGED_ERP_CTS_PROT) { 5416 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5413 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot); 5417 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5414 if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ) 5418 if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ)
5415 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; 5419 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5416 else 5420 else
5417 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 5421 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index ce52cf114fde..726ede391cb9 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -432,7 +432,7 @@ u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
432int il_init_channel_map(struct il_priv *il); 432int il_init_channel_map(struct il_priv *il);
433void il_free_channel_map(struct il_priv *il); 433void il_free_channel_map(struct il_priv *il);
434const struct il_channel_info *il_get_channel_info(const struct il_priv *il, 434const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
435 enum ieee80211_band band, 435 enum nl80211_band band,
436 u16 channel); 436 u16 channel);
437 437
438#define IL_NUM_SCAN_RATES (2) 438#define IL_NUM_SCAN_RATES (2)
@@ -497,7 +497,7 @@ struct il_channel_info {
497 497
498 u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */ 498 u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */
499 u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */ 499 u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */
500 enum ieee80211_band band; 500 enum nl80211_band band;
501 501
502 /* HT40 channel info */ 502 /* HT40 channel info */
503 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ 503 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
@@ -811,7 +811,7 @@ struct il_sensitivity_ranges {
811 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR 811 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
812 * @max_stations: 812 * @max_stations:
813 * @ht40_channel: is 40MHz width possible in band 2.4 813 * @ht40_channel: is 40MHz width possible in band 2.4
814 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ) 814 * BIT(NL80211_BAND_5GHZ) BIT(NL80211_BAND_5GHZ)
815 * @sw_crypto: 0 for hw, 1 for sw 815 * @sw_crypto: 0 for hw, 1 for sw
816 * @max_xxx_size: for ucode uses 816 * @max_xxx_size: for ucode uses
817 * @ct_kill_threshold: temperature threshold 817 * @ct_kill_threshold: temperature threshold
@@ -1141,13 +1141,13 @@ struct il_priv {
1141 struct list_head free_frames; 1141 struct list_head free_frames;
1142 int frames_count; 1142 int frames_count;
1143 1143
1144 enum ieee80211_band band; 1144 enum nl80211_band band;
1145 int alloc_rxb_page; 1145 int alloc_rxb_page;
1146 1146
1147 void (*handlers[IL_CN_MAX]) (struct il_priv *il, 1147 void (*handlers[IL_CN_MAX]) (struct il_priv *il,
1148 struct il_rx_buf *rxb); 1148 struct il_rx_buf *rxb);
1149 1149
1150 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 1150 struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
1151 1151
1152 /* spectrum measurement report caching */ 1152 /* spectrum measurement report caching */
1153 struct il_spectrum_notification measure_report; 1153 struct il_spectrum_notification measure_report;
@@ -1176,10 +1176,10 @@ struct il_priv {
1176 unsigned long scan_start; 1176 unsigned long scan_start;
1177 unsigned long scan_start_tsf; 1177 unsigned long scan_start_tsf;
1178 void *scan_cmd; 1178 void *scan_cmd;
1179 enum ieee80211_band scan_band; 1179 enum nl80211_band scan_band;
1180 struct cfg80211_scan_request *scan_request; 1180 struct cfg80211_scan_request *scan_request;
1181 struct ieee80211_vif *scan_vif; 1181 struct ieee80211_vif *scan_vif;
1182 u8 scan_tx_ant[IEEE80211_NUM_BANDS]; 1182 u8 scan_tx_ant[NUM_NL80211_BANDS];
1183 u8 mgmt_tx_ant; 1183 u8 mgmt_tx_ant;
1184 1184
1185 /* spinlock */ 1185 /* spinlock */
@@ -1479,7 +1479,7 @@ il_is_channel_radar(const struct il_channel_info *ch_info)
1479static inline u8 1479static inline u8
1480il_is_channel_a_band(const struct il_channel_info *ch_info) 1480il_is_channel_a_band(const struct il_channel_info *ch_info)
1481{ 1481{
1482 return ch_info->band == IEEE80211_BAND_5GHZ; 1482 return ch_info->band == NL80211_BAND_5GHZ;
1483} 1483}
1484 1484
1485static inline int 1485static inline int
@@ -1673,7 +1673,7 @@ struct il_cfg {
1673 /* params not likely to change within a device family */ 1673 /* params not likely to change within a device family */
1674 struct il_base_params *base_params; 1674 struct il_base_params *base_params;
1675 /* params likely to change within a device family */ 1675 /* params likely to change within a device family */
1676 u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; 1676 u8 scan_rx_antennas[NUM_NL80211_BANDS];
1677 enum il_led_mode led_mode; 1677 enum il_led_mode led_mode;
1678 1678
1679 int eeprom_size; 1679 int eeprom_size;
@@ -1707,9 +1707,9 @@ void il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt);
1707int il_check_rxon_cmd(struct il_priv *il); 1707int il_check_rxon_cmd(struct il_priv *il);
1708int il_full_rxon_required(struct il_priv *il); 1708int il_full_rxon_required(struct il_priv *il);
1709int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch); 1709int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch);
1710void il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band, 1710void il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
1711 struct ieee80211_vif *vif); 1711 struct ieee80211_vif *vif);
1712u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band); 1712u8 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band);
1713void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf); 1713void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
1714bool il_is_ht40_tx_allowed(struct il_priv *il, 1714bool il_is_ht40_tx_allowed(struct il_priv *il,
1715 struct ieee80211_sta_ht_cap *ht_cap); 1715 struct ieee80211_sta_ht_cap *ht_cap);
@@ -1793,9 +1793,9 @@ int il_force_reset(struct il_priv *il, bool external);
1793u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, 1793u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1794 const u8 *ta, const u8 *ie, int ie_len, int left); 1794 const u8 *ta, const u8 *ie, int ie_len, int left);
1795void il_setup_rx_scan_handlers(struct il_priv *il); 1795void il_setup_rx_scan_handlers(struct il_priv *il);
1796u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, 1796u16 il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
1797 u8 n_probes); 1797 u8 n_probes);
1798u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band, 1798u16 il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
1799 struct ieee80211_vif *vif); 1799 struct ieee80211_vif *vif);
1800void il_setup_scan_deferred_work(struct il_priv *il); 1800void il_setup_scan_deferred_work(struct il_priv *il);
1801void il_cancel_scan_deferred_work(struct il_priv *il); 1801void il_cancel_scan_deferred_work(struct il_priv *il);
@@ -1955,7 +1955,7 @@ il_commit_rxon(struct il_priv *il)
1955} 1955}
1956 1956
1957static inline const struct ieee80211_supported_band * 1957static inline const struct ieee80211_supported_band *
1958il_get_hw_mode(struct il_priv *il, enum ieee80211_band band) 1958il_get_hw_mode(struct il_priv *il, enum nl80211_band band)
1959{ 1959{
1960 return il->hw->wiphy->bands[band]; 1960 return il->hw->wiphy->bands[band];
1961} 1961}
@@ -2813,7 +2813,7 @@ struct il_lq_sta {
2813 u8 action_counter; /* # mode-switch actions tried */ 2813 u8 action_counter; /* # mode-switch actions tried */
2814 u8 is_green; 2814 u8 is_green;
2815 u8 is_dup; 2815 u8 is_dup;
2816 enum ieee80211_band band; 2816 enum nl80211_band band;
2817 2817
2818 /* The following are bitmaps of rates; RATE_6M_MASK, etc. */ 2818 /* The following are bitmaps of rates; RATE_6M_MASK, etc. */
2819 u32 supp_rates; 2819 u32 supp_rates;
diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c
index 908b9f4fef6f..6fc6b7ff9849 100644
--- a/drivers/net/wireless/intel/iwlegacy/debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/debug.c
@@ -544,7 +544,7 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
544 return -ENOMEM; 544 return -ENOMEM;
545 } 545 }
546 546
547 supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ); 547 supp_band = il_get_hw_mode(il, NL80211_BAND_2GHZ);
548 if (supp_band) { 548 if (supp_band) {
549 channels = supp_band->channels; 549 channels = supp_band->channels;
550 550
@@ -571,7 +571,7 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
571 flags & IEEE80211_CHAN_NO_IR ? 571 flags & IEEE80211_CHAN_NO_IR ?
572 "passive only" : "active/passive"); 572 "passive only" : "active/passive");
573 } 573 }
574 supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ); 574 supp_band = il_get_hw_mode(il, NL80211_BAND_5GHZ);
575 if (supp_band) { 575 if (supp_band) {
576 channels = supp_band->channels; 576 channels = supp_band->channels;
577 577
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index 9de277c6c420..b79e38734f2f 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -158,7 +158,7 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
158 struct iwl_rxon_context *ctx); 158 struct iwl_rxon_context *ctx);
159void iwl_set_flags_for_band(struct iwl_priv *priv, 159void iwl_set_flags_for_band(struct iwl_priv *priv,
160 struct iwl_rxon_context *ctx, 160 struct iwl_rxon_context *ctx,
161 enum ieee80211_band band, 161 enum nl80211_band band,
162 struct ieee80211_vif *vif); 162 struct ieee80211_vif *vif);
163 163
164/* uCode */ 164/* uCode */
@@ -186,7 +186,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv,
186 u8 flags, bool clear); 186 u8 flags, bool clear);
187 187
188static inline const struct ieee80211_supported_band *iwl_get_hw_mode( 188static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
189 struct iwl_priv *priv, enum ieee80211_band band) 189 struct iwl_priv *priv, enum nl80211_band band)
190{ 190{
191 return priv->hw->wiphy->bands[band]; 191 return priv->hw->wiphy->bands[band];
192} 192}
@@ -198,7 +198,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan);
198#endif 198#endif
199 199
200/* rx */ 200/* rx */
201int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); 201int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band);
202void iwl_setup_rx_handlers(struct iwl_priv *priv); 202void iwl_setup_rx_handlers(struct iwl_priv *priv);
203void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); 203void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
204 204
@@ -258,7 +258,7 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
258int __must_check iwl_scan_initiate(struct iwl_priv *priv, 258int __must_check iwl_scan_initiate(struct iwl_priv *priv,
259 struct ieee80211_vif *vif, 259 struct ieee80211_vif *vif,
260 enum iwl_scan_type scan_type, 260 enum iwl_scan_type scan_type,
261 enum ieee80211_band band); 261 enum nl80211_band band);
262 262
263/* For faster active scanning, scan will move to the next channel if fewer than 263/* For faster active scanning, scan will move to the next channel if fewer than
264 * PLCP_QUIET_THRESH packets are heard on this channel within 264 * PLCP_QUIET_THRESH packets are heard on this channel within
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
index 74c51615244e..f6591c83d636 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -335,7 +335,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
335 if (!buf) 335 if (!buf)
336 return -ENOMEM; 336 return -ENOMEM;
337 337
338 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); 338 supp_band = iwl_get_hw_mode(priv, NL80211_BAND_2GHZ);
339 if (supp_band) { 339 if (supp_band) {
340 channels = supp_band->channels; 340 channels = supp_band->channels;
341 341
@@ -358,7 +358,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
358 IEEE80211_CHAN_NO_IR ? 358 IEEE80211_CHAN_NO_IR ?
359 "passive only" : "active/passive"); 359 "passive only" : "active/passive");
360 } 360 }
361 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ); 361 supp_band = iwl_get_hw_mode(priv, NL80211_BAND_5GHZ);
362 if (supp_band) { 362 if (supp_band) {
363 channels = supp_band->channels; 363 channels = supp_band->channels;
364 364
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index 1a7ead753eee..8148df61a916 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -677,7 +677,7 @@ struct iwl_priv {
677 677
678 struct iwl_hw_params hw_params; 678 struct iwl_hw_params hw_params;
679 679
680 enum ieee80211_band band; 680 enum nl80211_band band;
681 u8 valid_contexts; 681 u8 valid_contexts;
682 682
683 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 683 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
@@ -722,11 +722,11 @@ struct iwl_priv {
722 unsigned long scan_start; 722 unsigned long scan_start;
723 unsigned long scan_start_tsf; 723 unsigned long scan_start_tsf;
724 void *scan_cmd; 724 void *scan_cmd;
725 enum ieee80211_band scan_band; 725 enum nl80211_band scan_band;
726 struct cfg80211_scan_request *scan_request; 726 struct cfg80211_scan_request *scan_request;
727 struct ieee80211_vif *scan_vif; 727 struct ieee80211_vif *scan_vif;
728 enum iwl_scan_type scan_type; 728 enum iwl_scan_type scan_type;
729 u8 scan_tx_ant[IEEE80211_NUM_BANDS]; 729 u8 scan_tx_ant[NUM_NL80211_BANDS];
730 u8 mgmt_tx_ant; 730 u8 mgmt_tx_ant;
731 731
732 /* max number of station keys */ 732 /* max number of station keys */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index cc13c04063a5..f21732ec3b25 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -420,7 +420,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
420 .data = { &cmd, }, 420 .data = { &cmd, },
421 }; 421 };
422 422
423 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 423 cmd.band = priv->band == NL80211_BAND_2GHZ;
424 ch = ch_switch->chandef.chan->hw_value; 424 ch = ch_switch->chandef.chan->hw_value;
425 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", 425 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
426 ctx->active.channel, ch); 426 ctx->active.channel, ch);
@@ -588,7 +588,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
588 588
589 hcmd.data[0] = cmd; 589 hcmd.data[0] = cmd;
590 590
591 cmd->band = priv->band == IEEE80211_BAND_2GHZ; 591 cmd->band = priv->band == NL80211_BAND_2GHZ;
592 ch = ch_switch->chandef.chan->hw_value; 592 ch = ch_switch->chandef.chan->hw_value;
593 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", 593 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
594 ctx->active.channel, ch); 594 ctx->active.channel, ch);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 1799469268ea..8dda52ae3bb5 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -94,7 +94,7 @@ void iwlagn_temperature(struct iwl_priv *priv)
94 iwl_tt_handler(priv); 94 iwl_tt_handler(priv);
95} 95}
96 96
97int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) 97int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
98{ 98{
99 int idx = 0; 99 int idx = 0;
100 int band_offset = 0; 100 int band_offset = 0;
@@ -105,7 +105,7 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
105 return idx; 105 return idx;
106 /* Legacy rate format, search for match in table */ 106 /* Legacy rate format, search for match in table */
107 } else { 107 } else {
108 if (band == IEEE80211_BAND_5GHZ) 108 if (band == NL80211_BAND_5GHZ)
109 band_offset = IWL_FIRST_OFDM_RATE; 109 band_offset = IWL_FIRST_OFDM_RATE;
110 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) 110 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
111 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) 111 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
@@ -878,7 +878,7 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
878 int i; 878 int i;
879 u8 ind = ant; 879 u8 ind = ant;
880 880
881 if (priv->band == IEEE80211_BAND_2GHZ && 881 if (priv->band == NL80211_BAND_2GHZ &&
882 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) 882 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
883 return 0; 883 return 0;
884 884
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index c63ea79571ff..8c0719468d00 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -202,12 +202,12 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
202 202
203 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 203 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
204 204
205 if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) 205 if (priv->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
206 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 206 priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
207 &priv->nvm_data->bands[IEEE80211_BAND_2GHZ]; 207 &priv->nvm_data->bands[NL80211_BAND_2GHZ];
208 if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) 208 if (priv->nvm_data->bands[NL80211_BAND_5GHZ].n_channels)
209 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 209 priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
210 &priv->nvm_data->bands[IEEE80211_BAND_5GHZ]; 210 &priv->nvm_data->bands[NL80211_BAND_5GHZ];
211 211
212 hw->wiphy->hw_version = priv->trans->hw_id; 212 hw->wiphy->hw_version = priv->trans->hw_id;
213 213
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 614716251c39..37b32a6f60fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -262,7 +262,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
262 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 262 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
263 263
264 /* In mac80211, rates for 5 GHz start at 0 */ 264 /* In mac80211, rates for 5 GHz start at 0 */
265 if (info->band == IEEE80211_BAND_5GHZ) 265 if (info->band == NL80211_BAND_5GHZ)
266 rate += IWL_FIRST_OFDM_RATE; 266 rate += IWL_FIRST_OFDM_RATE;
267 else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE) 267 else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE)
268 rate_flags |= RATE_MCS_CCK_MSK; 268 rate_flags |= RATE_MCS_CCK_MSK;
@@ -1117,7 +1117,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
1117 1117
1118 INIT_LIST_HEAD(&priv->calib_results); 1118 INIT_LIST_HEAD(&priv->calib_results);
1119 1119
1120 priv->band = IEEE80211_BAND_2GHZ; 1120 priv->band = NL80211_BAND_2GHZ;
1121 1121
1122 priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold; 1122 priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold;
1123 1123
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index ee7505537c96..b95c2d76db33 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -599,7 +599,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_priv *priv,
599 * fill "search" or "active" tx mode table. 599 * fill "search" or "active" tx mode table.
600 */ 600 */
601static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, 601static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
602 enum ieee80211_band band, 602 enum nl80211_band band,
603 struct iwl_scale_tbl_info *tbl, 603 struct iwl_scale_tbl_info *tbl,
604 int *rate_idx) 604 int *rate_idx)
605{ 605{
@@ -624,7 +624,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
624 /* legacy rate format */ 624 /* legacy rate format */
625 if (!(rate_n_flags & RATE_MCS_HT_MSK)) { 625 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
626 if (num_of_ant == 1) { 626 if (num_of_ant == 1) {
627 if (band == IEEE80211_BAND_5GHZ) 627 if (band == NL80211_BAND_5GHZ)
628 tbl->lq_type = LQ_A; 628 tbl->lq_type = LQ_A;
629 else 629 else
630 tbl->lq_type = LQ_G; 630 tbl->lq_type = LQ_G;
@@ -802,7 +802,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
802 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { 802 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
803 switch_to_legacy = 1; 803 switch_to_legacy = 1;
804 scale_index = rs_ht_to_legacy[scale_index]; 804 scale_index = rs_ht_to_legacy[scale_index];
805 if (lq_sta->band == IEEE80211_BAND_5GHZ) 805 if (lq_sta->band == NL80211_BAND_5GHZ)
806 tbl->lq_type = LQ_A; 806 tbl->lq_type = LQ_A;
807 else 807 else
808 tbl->lq_type = LQ_G; 808 tbl->lq_type = LQ_G;
@@ -821,7 +821,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
821 /* Mask with station rate restriction */ 821 /* Mask with station rate restriction */
822 if (is_legacy(tbl->lq_type)) { 822 if (is_legacy(tbl->lq_type)) {
823 /* supp_rates has no CCK bits in A mode */ 823 /* supp_rates has no CCK bits in A mode */
824 if (lq_sta->band == IEEE80211_BAND_5GHZ) 824 if (lq_sta->band == NL80211_BAND_5GHZ)
825 rate_mask = (u16)(rate_mask & 825 rate_mask = (u16)(rate_mask &
826 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); 826 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
827 else 827 else
@@ -939,7 +939,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
939 table = &lq_sta->lq; 939 table = &lq_sta->lq;
940 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); 940 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
941 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index); 941 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
942 if (priv->band == IEEE80211_BAND_5GHZ) 942 if (priv->band == NL80211_BAND_5GHZ)
943 rs_index -= IWL_FIRST_OFDM_RATE; 943 rs_index -= IWL_FIRST_OFDM_RATE;
944 mac_flags = info->status.rates[0].flags; 944 mac_flags = info->status.rates[0].flags;
945 mac_index = info->status.rates[0].idx; 945 mac_index = info->status.rates[0].idx;
@@ -952,7 +952,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
952 * mac80211 HT index is always zero-indexed; we need to move 952 * mac80211 HT index is always zero-indexed; we need to move
953 * HT OFDM rates after CCK rates in 2.4 GHz band 953 * HT OFDM rates after CCK rates in 2.4 GHz band
954 */ 954 */
955 if (priv->band == IEEE80211_BAND_2GHZ) 955 if (priv->band == NL80211_BAND_2GHZ)
956 mac_index += IWL_FIRST_OFDM_RATE; 956 mac_index += IWL_FIRST_OFDM_RATE;
957 } 957 }
958 /* Here we actually compare this rate to the latest LQ command */ 958 /* Here we actually compare this rate to the latest LQ command */
@@ -2284,7 +2284,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2284 2284
2285 /* mask with station rate restriction */ 2285 /* mask with station rate restriction */
2286 if (is_legacy(tbl->lq_type)) { 2286 if (is_legacy(tbl->lq_type)) {
2287 if (lq_sta->band == IEEE80211_BAND_5GHZ) 2287 if (lq_sta->band == NL80211_BAND_5GHZ)
2288 /* supp_rates has no CCK bits in A mode */ 2288 /* supp_rates has no CCK bits in A mode */
2289 rate_scale_index_msk = (u16) (rate_mask & 2289 rate_scale_index_msk = (u16) (rate_mask &
2290 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); 2290 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
@@ -2721,7 +2721,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2721 /* Get max rate if user set max rate */ 2721 /* Get max rate if user set max rate */
2722 if (lq_sta) { 2722 if (lq_sta) {
2723 lq_sta->max_rate_idx = txrc->max_rate_idx; 2723 lq_sta->max_rate_idx = txrc->max_rate_idx;
2724 if ((sband->band == IEEE80211_BAND_5GHZ) && 2724 if ((sband->band == NL80211_BAND_5GHZ) &&
2725 (lq_sta->max_rate_idx != -1)) 2725 (lq_sta->max_rate_idx != -1))
2726 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; 2726 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2727 if ((lq_sta->max_rate_idx < 0) || 2727 if ((lq_sta->max_rate_idx < 0) ||
@@ -2763,11 +2763,11 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2763 } else { 2763 } else {
2764 /* Check for invalid rates */ 2764 /* Check for invalid rates */
2765 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) || 2765 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2766 ((sband->band == IEEE80211_BAND_5GHZ) && 2766 ((sband->band == NL80211_BAND_5GHZ) &&
2767 (rate_idx < IWL_FIRST_OFDM_RATE))) 2767 (rate_idx < IWL_FIRST_OFDM_RATE)))
2768 rate_idx = rate_lowest_index(sband, sta); 2768 rate_idx = rate_lowest_index(sband, sta);
2769 /* On valid 5 GHz rate, adjust index */ 2769 /* On valid 5 GHz rate, adjust index */
2770 else if (sband->band == IEEE80211_BAND_5GHZ) 2770 else if (sband->band == NL80211_BAND_5GHZ)
2771 rate_idx -= IWL_FIRST_OFDM_RATE; 2771 rate_idx -= IWL_FIRST_OFDM_RATE;
2772 info->control.rates[0].flags = 0; 2772 info->control.rates[0].flags = 0;
2773 } 2773 }
@@ -2880,7 +2880,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2880 2880
2881 /* Set last_txrate_idx to lowest rate */ 2881 /* Set last_txrate_idx to lowest rate */
2882 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); 2882 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2883 if (sband->band == IEEE80211_BAND_5GHZ) 2883 if (sband->band == NL80211_BAND_5GHZ)
2884 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 2884 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2885 lq_sta->is_agg = 0; 2885 lq_sta->is_agg = 0;
2886#ifdef CONFIG_MAC80211_DEBUGFS 2886#ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
index c5fe44584613..50c1e951dd2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
@@ -355,7 +355,7 @@ struct iwl_lq_sta {
355 u8 action_counter; /* # mode-switch actions tried */ 355 u8 action_counter; /* # mode-switch actions tried */
356 u8 is_green; 356 u8 is_green;
357 u8 is_dup; 357 u8 is_dup;
358 enum ieee80211_band band; 358 enum nl80211_band band;
359 359
360 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 360 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
361 u32 supp_rates; 361 u32 supp_rates;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index 52ab1e012e8f..dfa2041cfdac 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -686,7 +686,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
686 686
687 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 687 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
688 688
689 ieee80211_rx_napi(priv->hw, skb, priv->napi); 689 ieee80211_rx_napi(priv->hw, NULL, skb, priv->napi);
690} 690}
691 691
692static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) 692static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
@@ -834,7 +834,7 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
834 /* rx_status carries information about the packet to mac80211 */ 834 /* rx_status carries information about the packet to mac80211 */
835 rx_status.mactime = le64_to_cpu(phy_res->timestamp); 835 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
836 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 836 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
837 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 837 NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
838 rx_status.freq = 838 rx_status.freq =
839 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), 839 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
840 rx_status.band); 840 rx_status.band);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index 2d47cb24c48b..b228552184b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -719,7 +719,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
719void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, 719void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
720 struct iwl_rxon_context *ctx) 720 struct iwl_rxon_context *ctx)
721{ 721{
722 enum ieee80211_band band = ch->band; 722 enum nl80211_band band = ch->band;
723 u16 channel = ch->hw_value; 723 u16 channel = ch->hw_value;
724 724
725 if ((le16_to_cpu(ctx->staging.channel) == channel) && 725 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
@@ -727,7 +727,7 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
727 return; 727 return;
728 728
729 ctx->staging.channel = cpu_to_le16(channel); 729 ctx->staging.channel = cpu_to_le16(channel);
730 if (band == IEEE80211_BAND_5GHZ) 730 if (band == NL80211_BAND_5GHZ)
731 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; 731 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
732 else 732 else
733 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; 733 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
@@ -740,10 +740,10 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
740 740
741void iwl_set_flags_for_band(struct iwl_priv *priv, 741void iwl_set_flags_for_band(struct iwl_priv *priv,
742 struct iwl_rxon_context *ctx, 742 struct iwl_rxon_context *ctx,
743 enum ieee80211_band band, 743 enum nl80211_band band,
744 struct ieee80211_vif *vif) 744 struct ieee80211_vif *vif)
745{ 745{
746 if (band == IEEE80211_BAND_5GHZ) { 746 if (band == NL80211_BAND_5GHZ) {
747 ctx->staging.flags &= 747 ctx->staging.flags &=
748 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK 748 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
749 | RXON_FLG_CCK_MSK); 749 | RXON_FLG_CCK_MSK);
@@ -1476,7 +1476,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1476 1476
1477 iwlagn_set_rxon_chain(priv, ctx); 1477 iwlagn_set_rxon_chain(priv, ctx);
1478 1478
1479 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) 1479 if (bss_conf->use_cts_prot && (priv->band != NL80211_BAND_5GHZ))
1480 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; 1480 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
1481 else 1481 else
1482 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 1482 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index 81a2ddbe9569..d01766f16175 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -312,7 +312,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
312 scan_notif->tsf_high, scan_notif->status); 312 scan_notif->tsf_high, scan_notif->status);
313 313
314 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n", 314 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
315 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", 315 (priv->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
316 jiffies_to_msecs(jiffies - priv->scan_start)); 316 jiffies_to_msecs(jiffies - priv->scan_start));
317 317
318 /* 318 /*
@@ -362,9 +362,9 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
362} 362}
363 363
364static u16 iwl_get_active_dwell_time(struct iwl_priv *priv, 364static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
365 enum ieee80211_band band, u8 n_probes) 365 enum nl80211_band band, u8 n_probes)
366{ 366{
367 if (band == IEEE80211_BAND_5GHZ) 367 if (band == NL80211_BAND_5GHZ)
368 return IWL_ACTIVE_DWELL_TIME_52 + 368 return IWL_ACTIVE_DWELL_TIME_52 +
369 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); 369 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
370 else 370 else
@@ -431,9 +431,9 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
431} 431}
432 432
433static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, 433static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
434 enum ieee80211_band band) 434 enum nl80211_band band)
435{ 435{
436 u16 passive = (band == IEEE80211_BAND_2GHZ) ? 436 u16 passive = (band == NL80211_BAND_2GHZ) ?
437 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : 437 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
438 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; 438 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
439 439
@@ -442,7 +442,7 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
442 442
443/* Return valid, unused, channel for a passive scan to reset the RF */ 443/* Return valid, unused, channel for a passive scan to reset the RF */
444static u8 iwl_get_single_channel_number(struct iwl_priv *priv, 444static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
445 enum ieee80211_band band) 445 enum nl80211_band band)
446{ 446{
447 struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band]; 447 struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band];
448 struct iwl_rxon_context *ctx; 448 struct iwl_rxon_context *ctx;
@@ -470,7 +470,7 @@ static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
470 470
471static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv, 471static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
472 struct ieee80211_vif *vif, 472 struct ieee80211_vif *vif,
473 enum ieee80211_band band, 473 enum nl80211_band band,
474 struct iwl_scan_channel *scan_ch) 474 struct iwl_scan_channel *scan_ch)
475{ 475{
476 const struct ieee80211_supported_band *sband; 476 const struct ieee80211_supported_band *sband;
@@ -492,7 +492,7 @@ static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
492 cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME); 492 cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
493 /* Set txpower levels to defaults */ 493 /* Set txpower levels to defaults */
494 scan_ch->dsp_atten = 110; 494 scan_ch->dsp_atten = 110;
495 if (band == IEEE80211_BAND_5GHZ) 495 if (band == NL80211_BAND_5GHZ)
496 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; 496 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
497 else 497 else
498 scan_ch->tx_gain = ((1 << 5) | (5 << 3)); 498 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -505,7 +505,7 @@ static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
505 505
506static int iwl_get_channels_for_scan(struct iwl_priv *priv, 506static int iwl_get_channels_for_scan(struct iwl_priv *priv,
507 struct ieee80211_vif *vif, 507 struct ieee80211_vif *vif,
508 enum ieee80211_band band, 508 enum nl80211_band band,
509 u8 is_active, u8 n_probes, 509 u8 is_active, u8 n_probes,
510 struct iwl_scan_channel *scan_ch) 510 struct iwl_scan_channel *scan_ch)
511{ 511{
@@ -553,7 +553,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
553 * power level: 553 * power level:
554 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; 554 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
555 */ 555 */
556 if (band == IEEE80211_BAND_5GHZ) 556 if (band == NL80211_BAND_5GHZ)
557 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; 557 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
558 else 558 else
559 scan_ch->tx_gain = ((1 << 5) | (5 << 3)); 559 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -636,7 +636,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
636 u32 rate_flags = 0; 636 u32 rate_flags = 0;
637 u16 cmd_len = 0; 637 u16 cmd_len = 0;
638 u16 rx_chain = 0; 638 u16 rx_chain = 0;
639 enum ieee80211_band band; 639 enum nl80211_band band;
640 u8 n_probes = 0; 640 u8 n_probes = 0;
641 u8 rx_ant = priv->nvm_data->valid_rx_ant; 641 u8 rx_ant = priv->nvm_data->valid_rx_ant;
642 u8 rate; 642 u8 rate;
@@ -750,7 +750,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
750 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 750 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
751 751
752 switch (priv->scan_band) { 752 switch (priv->scan_band) {
753 case IEEE80211_BAND_2GHZ: 753 case NL80211_BAND_2GHZ:
754 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 754 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
755 chan_mod = le32_to_cpu( 755 chan_mod = le32_to_cpu(
756 priv->contexts[IWL_RXON_CTX_BSS].active.flags & 756 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
@@ -771,7 +771,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
771 priv->lib->bt_params->advanced_bt_coexist) 771 priv->lib->bt_params->advanced_bt_coexist)
772 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; 772 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
773 break; 773 break;
774 case IEEE80211_BAND_5GHZ: 774 case NL80211_BAND_5GHZ:
775 rate = IWL_RATE_6M_PLCP; 775 rate = IWL_RATE_6M_PLCP;
776 break; 776 break;
777 default: 777 default:
@@ -809,7 +809,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
809 809
810 band = priv->scan_band; 810 band = priv->scan_band;
811 811
812 if (band == IEEE80211_BAND_2GHZ && 812 if (band == NL80211_BAND_2GHZ &&
813 priv->lib->bt_params && 813 priv->lib->bt_params &&
814 priv->lib->bt_params->advanced_bt_coexist) { 814 priv->lib->bt_params->advanced_bt_coexist) {
815 /* transmit 2.4 GHz probes only on first antenna */ 815 /* transmit 2.4 GHz probes only on first antenna */
@@ -925,16 +925,16 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
925void iwl_init_scan_params(struct iwl_priv *priv) 925void iwl_init_scan_params(struct iwl_priv *priv)
926{ 926{
927 u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1; 927 u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1;
928 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) 928 if (!priv->scan_tx_ant[NL80211_BAND_5GHZ])
929 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; 929 priv->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
930 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 930 if (!priv->scan_tx_ant[NL80211_BAND_2GHZ])
931 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; 931 priv->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
932} 932}
933 933
934int __must_check iwl_scan_initiate(struct iwl_priv *priv, 934int __must_check iwl_scan_initiate(struct iwl_priv *priv,
935 struct ieee80211_vif *vif, 935 struct ieee80211_vif *vif,
936 enum iwl_scan_type scan_type, 936 enum iwl_scan_type scan_type,
937 enum ieee80211_band band) 937 enum nl80211_band band)
938{ 938{
939 int ret; 939 int ret;
940 940
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index 8e9768a553e4..de6ec9b7ace4 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -579,7 +579,7 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
579 579
580 /* Set up the rate scaling to start at selected rate, fall back 580 /* Set up the rate scaling to start at selected rate, fall back
581 * all the way down to 1M in IEEE order, and then spin on 1M */ 581 * all the way down to 1M in IEEE order, and then spin on 1M */
582 if (priv->band == IEEE80211_BAND_5GHZ) 582 if (priv->band == NL80211_BAND_5GHZ)
583 r = IWL_RATE_6M_INDEX; 583 r = IWL_RATE_6M_INDEX;
584 else if (ctx && ctx->vif && ctx->vif->p2p) 584 else if (ctx && ctx->vif && ctx->vif->p2p)
585 r = IWL_RATE_6M_INDEX; 585 r = IWL_RATE_6M_INDEX;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 59e2001c39f8..4b97371c3b42 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -81,7 +81,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
81 tx_flags |= TX_CMD_FLG_TSF_MSK; 81 tx_flags |= TX_CMD_FLG_TSF_MSK;
82 else if (ieee80211_is_back_req(fc)) 82 else if (ieee80211_is_back_req(fc))
83 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; 83 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
84 else if (info->band == IEEE80211_BAND_2GHZ && 84 else if (info->band == NL80211_BAND_2GHZ &&
85 priv->lib->bt_params && 85 priv->lib->bt_params &&
86 priv->lib->bt_params->advanced_bt_coexist && 86 priv->lib->bt_params->advanced_bt_coexist &&
87 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || 87 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
@@ -177,7 +177,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
177 rate_idx = rate_lowest_index( 177 rate_idx = rate_lowest_index(
178 &priv->nvm_data->bands[info->band], sta); 178 &priv->nvm_data->bands[info->band], sta);
179 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 179 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
180 if (info->band == IEEE80211_BAND_5GHZ) 180 if (info->band == NL80211_BAND_5GHZ)
181 rate_idx += IWL_FIRST_OFDM_RATE; 181 rate_idx += IWL_FIRST_OFDM_RATE;
182 /* Get PLCP rate for tx_cmd->rate_n_flags */ 182 /* Get PLCP rate for tx_cmd->rate_n_flags */
183 rate_plcp = iwl_rates[rate_idx].plcp; 183 rate_plcp = iwl_rates[rate_idx].plcp;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
index ef22c3d168fc..5c2aae64d59f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
@@ -64,7 +64,7 @@ static const struct iwl_base_params iwl1000_base_params = {
64static const struct iwl_ht_params iwl1000_ht_params = { 64static const struct iwl_ht_params iwl1000_ht_params = {
65 .ht_greenfield_support = true, 65 .ht_greenfield_support = true,
66 .use_rts_for_aggregation = true, /* use rts/cts protection */ 66 .use_rts_for_aggregation = true, /* use rts/cts protection */
67 .ht40_bands = BIT(IEEE80211_BAND_2GHZ), 67 .ht40_bands = BIT(NL80211_BAND_2GHZ),
68}; 68};
69 69
70static const struct iwl_eeprom_params iwl1000_eeprom_params = { 70static const struct iwl_eeprom_params iwl1000_eeprom_params = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
index dc246c997084..2e823bdc4757 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
@@ -89,7 +89,7 @@ static const struct iwl_base_params iwl2030_base_params = {
89static const struct iwl_ht_params iwl2000_ht_params = { 89static const struct iwl_ht_params iwl2000_ht_params = {
90 .ht_greenfield_support = true, 90 .ht_greenfield_support = true,
91 .use_rts_for_aggregation = true, /* use rts/cts protection */ 91 .use_rts_for_aggregation = true, /* use rts/cts protection */
92 .ht40_bands = BIT(IEEE80211_BAND_2GHZ), 92 .ht40_bands = BIT(NL80211_BAND_2GHZ),
93}; 93};
94 94
95static const struct iwl_eeprom_params iwl20x0_eeprom_params = { 95static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
index 4dcdab6781cc..4c3e3cf4c799 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
@@ -62,7 +62,7 @@ static const struct iwl_base_params iwl5000_base_params = {
62 62
63static const struct iwl_ht_params iwl5000_ht_params = { 63static const struct iwl_ht_params iwl5000_ht_params = {
64 .ht_greenfield_support = true, 64 .ht_greenfield_support = true,
65 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), 65 .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
66}; 66};
67 67
68static const struct iwl_eeprom_params iwl5000_eeprom_params = { 68static const struct iwl_eeprom_params iwl5000_eeprom_params = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
index 9938f5340ac0..5a7b7e1f0aab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
@@ -110,7 +110,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
110static const struct iwl_ht_params iwl6000_ht_params = { 110static const struct iwl_ht_params iwl6000_ht_params = {
111 .ht_greenfield_support = true, 111 .ht_greenfield_support = true,
112 .use_rts_for_aggregation = true, /* use rts/cts protection */ 112 .use_rts_for_aggregation = true, /* use rts/cts protection */
113 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), 113 .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
114}; 114};
115 115
116static const struct iwl_eeprom_params iwl6000_eeprom_params = { 116static const struct iwl_eeprom_params iwl6000_eeprom_params = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index b6283c881d42..abd2904ecc48 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -156,7 +156,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
156 156
157static const struct iwl_ht_params iwl7000_ht_params = { 157static const struct iwl_ht_params iwl7000_ht_params = {
158 .stbc = true, 158 .stbc = true,
159 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), 159 .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
160}; 160};
161 161
162#define IWL_DEVICE_7000_COMMON \ 162#define IWL_DEVICE_7000_COMMON \
@@ -287,7 +287,7 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
287static const struct iwl_ht_params iwl7265_ht_params = { 287static const struct iwl_ht_params iwl7265_ht_params = {
288 .stbc = true, 288 .stbc = true,
289 .ldpc = true, 289 .ldpc = true,
290 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), 290 .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
291}; 291};
292 292
293const struct iwl_cfg iwl3165_2ac_cfg = { 293const struct iwl_cfg iwl3165_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 0728a288aa3d..a9212a12f4da 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -124,7 +124,7 @@ static const struct iwl_base_params iwl8000_base_params = {
124static const struct iwl_ht_params iwl8000_ht_params = { 124static const struct iwl_ht_params iwl8000_ht_params = {
125 .stbc = true, 125 .stbc = true,
126 .ldpc = true, 126 .ldpc = true,
127 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), 127 .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
128}; 128};
129 129
130static const struct iwl_tt_params iwl8000_tt_params = { 130static const struct iwl_tt_params iwl8000_tt_params = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index a3d35aa291a9..b9aca3795f06 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -93,7 +93,7 @@ static const struct iwl_base_params iwl9000_base_params = {
93static const struct iwl_ht_params iwl9000_ht_params = { 93static const struct iwl_ht_params iwl9000_ht_params = {
94 .stbc = true, 94 .stbc = true,
95 .ldpc = true, 95 .ldpc = true,
96 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), 96 .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
97}; 97};
98 98
99static const struct iwl_tt_params iwl9000_tt_params = { 99static const struct iwl_tt_params iwl9000_tt_params = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 08bb4f4e424a..720679889ab3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -185,7 +185,7 @@ struct iwl_base_params {
185 * @stbc: support Tx STBC and 1*SS Rx STBC 185 * @stbc: support Tx STBC and 1*SS Rx STBC
186 * @ldpc: support Tx/Rx with LDPC 186 * @ldpc: support Tx/Rx with LDPC
187 * @use_rts_for_aggregation: use rts/cts protection for HT traffic 187 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
188 * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40 188 * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40
189 */ 189 */
190struct iwl_ht_params { 190struct iwl_ht_params {
191 enum ieee80211_smps_mode smps_mode; 191 enum ieee80211_smps_mode smps_mode;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index c15f5be85197..bf1b69aec813 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -390,10 +390,10 @@ iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
390 int n_channels, s8 max_txpower_avg) 390 int n_channels, s8 max_txpower_avg)
391{ 391{
392 int ch_idx; 392 int ch_idx;
393 enum ieee80211_band band; 393 enum nl80211_band band;
394 394
395 band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ? 395 band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
396 IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; 396 NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
397 397
398 for (ch_idx = 0; ch_idx < n_channels; ch_idx++) { 398 for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
399 struct ieee80211_channel *chan = &data->channels[ch_idx]; 399 struct ieee80211_channel *chan = &data->channels[ch_idx];
@@ -526,7 +526,7 @@ static void iwl_init_band_reference(const struct iwl_cfg *cfg,
526 526
527static void iwl_mod_ht40_chan_info(struct device *dev, 527static void iwl_mod_ht40_chan_info(struct device *dev,
528 struct iwl_nvm_data *data, int n_channels, 528 struct iwl_nvm_data *data, int n_channels,
529 enum ieee80211_band band, u16 channel, 529 enum nl80211_band band, u16 channel,
530 const struct iwl_eeprom_channel *eeprom_ch, 530 const struct iwl_eeprom_channel *eeprom_ch,
531 u8 clear_ht40_extension_channel) 531 u8 clear_ht40_extension_channel)
532{ 532{
@@ -548,7 +548,7 @@ static void iwl_mod_ht40_chan_info(struct device *dev,
548 IWL_DEBUG_EEPROM(dev, 548 IWL_DEBUG_EEPROM(dev,
549 "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", 549 "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
550 channel, 550 channel,
551 band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4", 551 band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
552 CHECK_AND_PRINT(IBSS), 552 CHECK_AND_PRINT(IBSS),
553 CHECK_AND_PRINT(ACTIVE), 553 CHECK_AND_PRINT(ACTIVE),
554 CHECK_AND_PRINT(RADAR), 554 CHECK_AND_PRINT(RADAR),
@@ -606,8 +606,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
606 n_channels++; 606 n_channels++;
607 607
608 channel->hw_value = eeprom_ch_array[ch_idx]; 608 channel->hw_value = eeprom_ch_array[ch_idx];
609 channel->band = (band == 1) ? IEEE80211_BAND_2GHZ 609 channel->band = (band == 1) ? NL80211_BAND_2GHZ
610 : IEEE80211_BAND_5GHZ; 610 : NL80211_BAND_5GHZ;
611 channel->center_freq = 611 channel->center_freq =
612 ieee80211_channel_to_frequency( 612 ieee80211_channel_to_frequency(
613 channel->hw_value, channel->band); 613 channel->hw_value, channel->band);
@@ -677,15 +677,15 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
677 677
678 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ 678 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
679 for (band = 6; band <= 7; band++) { 679 for (band = 6; band <= 7; band++) {
680 enum ieee80211_band ieeeband; 680 enum nl80211_band ieeeband;
681 681
682 iwl_init_band_reference(cfg, eeprom, eeprom_size, band, 682 iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
683 &eeprom_ch_count, &eeprom_ch_info, 683 &eeprom_ch_count, &eeprom_ch_info,
684 &eeprom_ch_array); 684 &eeprom_ch_array);
685 685
686 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ 686 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
687 ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ 687 ieeeband = (band == 6) ? NL80211_BAND_2GHZ
688 : IEEE80211_BAND_5GHZ; 688 : NL80211_BAND_5GHZ;
689 689
690 /* Loop through each band adding each of the channels */ 690 /* Loop through each band adding each of the channels */
691 for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) { 691 for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
@@ -708,7 +708,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
708 708
709int iwl_init_sband_channels(struct iwl_nvm_data *data, 709int iwl_init_sband_channels(struct iwl_nvm_data *data,
710 struct ieee80211_supported_band *sband, 710 struct ieee80211_supported_band *sband,
711 int n_channels, enum ieee80211_band band) 711 int n_channels, enum nl80211_band band)
712{ 712{
713 struct ieee80211_channel *chan = &data->channels[0]; 713 struct ieee80211_channel *chan = &data->channels[0];
714 int n = 0, idx = 0; 714 int n = 0, idx = 0;
@@ -734,7 +734,7 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
734void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, 734void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
735 struct iwl_nvm_data *data, 735 struct iwl_nvm_data *data,
736 struct ieee80211_sta_ht_cap *ht_info, 736 struct ieee80211_sta_ht_cap *ht_info,
737 enum ieee80211_band band, 737 enum nl80211_band band,
738 u8 tx_chains, u8 rx_chains) 738 u8 tx_chains, u8 rx_chains)
739{ 739{
740 int max_bit_rate = 0; 740 int max_bit_rate = 0;
@@ -813,22 +813,22 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
813 int n_used = 0; 813 int n_used = 0;
814 struct ieee80211_supported_band *sband; 814 struct ieee80211_supported_band *sband;
815 815
816 sband = &data->bands[IEEE80211_BAND_2GHZ]; 816 sband = &data->bands[NL80211_BAND_2GHZ];
817 sband->band = IEEE80211_BAND_2GHZ; 817 sband->band = NL80211_BAND_2GHZ;
818 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; 818 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
819 sband->n_bitrates = N_RATES_24; 819 sband->n_bitrates = N_RATES_24;
820 n_used += iwl_init_sband_channels(data, sband, n_channels, 820 n_used += iwl_init_sband_channels(data, sband, n_channels,
821 IEEE80211_BAND_2GHZ); 821 NL80211_BAND_2GHZ);
822 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, 822 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
823 data->valid_tx_ant, data->valid_rx_ant); 823 data->valid_tx_ant, data->valid_rx_ant);
824 824
825 sband = &data->bands[IEEE80211_BAND_5GHZ]; 825 sband = &data->bands[NL80211_BAND_5GHZ];
826 sband->band = IEEE80211_BAND_5GHZ; 826 sband->band = NL80211_BAND_5GHZ;
827 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; 827 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
828 sband->n_bitrates = N_RATES_52; 828 sband->n_bitrates = N_RATES_52;
829 n_used += iwl_init_sband_channels(data, sband, n_channels, 829 n_used += iwl_init_sband_channels(data, sband, n_channels,
830 IEEE80211_BAND_5GHZ); 830 NL80211_BAND_5GHZ);
831 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, 831 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
832 data->valid_tx_ant, data->valid_rx_ant); 832 data->valid_tx_ant, data->valid_rx_ant);
833 833
834 if (n_channels != n_used) 834 if (n_channels != n_used)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
index ad2b834668ff..53f39a34eca2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
@@ -98,7 +98,7 @@ struct iwl_nvm_data {
98 s8 max_tx_pwr_half_dbm; 98 s8 max_tx_pwr_half_dbm;
99 99
100 bool lar_enabled; 100 bool lar_enabled;
101 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 101 struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
102 struct ieee80211_channel channels[]; 102 struct ieee80211_channel channels[];
103}; 103};
104 104
@@ -133,12 +133,12 @@ int iwl_nvm_check_version(struct iwl_nvm_data *data,
133 133
134int iwl_init_sband_channels(struct iwl_nvm_data *data, 134int iwl_init_sband_channels(struct iwl_nvm_data *data,
135 struct ieee80211_supported_band *sband, 135 struct ieee80211_supported_band *sband,
136 int n_channels, enum ieee80211_band band); 136 int n_channels, enum nl80211_band band);
137 137
138void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, 138void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
139 struct iwl_nvm_data *data, 139 struct iwl_nvm_data *data,
140 struct ieee80211_sta_ht_cap *ht_info, 140 struct ieee80211_sta_ht_cap *ht_info,
141 enum ieee80211_band band, 141 enum nl80211_band band,
142 u8 tx_chains, u8 rx_chains); 142 u8 tx_chains, u8 rx_chains);
143 143
144#endif /* __iwl_eeprom_parse_h__ */ 144#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 93a689583dff..14743c37d976 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -308,7 +308,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
308 308
309 channel->hw_value = nvm_chan[ch_idx]; 309 channel->hw_value = nvm_chan[ch_idx];
310 channel->band = (ch_idx < num_2ghz_channels) ? 310 channel->band = (ch_idx < num_2ghz_channels) ?
311 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 311 NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
312 channel->center_freq = 312 channel->center_freq =
313 ieee80211_channel_to_frequency( 313 ieee80211_channel_to_frequency(
314 channel->hw_value, channel->band); 314 channel->hw_value, channel->band);
@@ -320,7 +320,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
320 * is not used in mvm, and is used for backwards compatibility 320 * is not used in mvm, and is used for backwards compatibility
321 */ 321 */
322 channel->max_power = IWL_DEFAULT_MAX_TX_POWER; 322 channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
323 is_5ghz = channel->band == IEEE80211_BAND_5GHZ; 323 is_5ghz = channel->band == NL80211_BAND_5GHZ;
324 324
325 /* don't put limitations in case we're using LAR */ 325 /* don't put limitations in case we're using LAR */
326 if (!lar_supported) 326 if (!lar_supported)
@@ -439,22 +439,22 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
439 &ch_section[NVM_CHANNELS_FAMILY_8000], 439 &ch_section[NVM_CHANNELS_FAMILY_8000],
440 lar_supported); 440 lar_supported);
441 441
442 sband = &data->bands[IEEE80211_BAND_2GHZ]; 442 sband = &data->bands[NL80211_BAND_2GHZ];
443 sband->band = IEEE80211_BAND_2GHZ; 443 sband->band = NL80211_BAND_2GHZ;
444 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; 444 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
445 sband->n_bitrates = N_RATES_24; 445 sband->n_bitrates = N_RATES_24;
446 n_used += iwl_init_sband_channels(data, sband, n_channels, 446 n_used += iwl_init_sband_channels(data, sband, n_channels,
447 IEEE80211_BAND_2GHZ); 447 NL80211_BAND_2GHZ);
448 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, 448 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
449 tx_chains, rx_chains); 449 tx_chains, rx_chains);
450 450
451 sband = &data->bands[IEEE80211_BAND_5GHZ]; 451 sband = &data->bands[NL80211_BAND_5GHZ];
452 sband->band = IEEE80211_BAND_5GHZ; 452 sband->band = NL80211_BAND_5GHZ;
453 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; 453 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
454 sband->n_bitrates = N_RATES_52; 454 sband->n_bitrates = N_RATES_52;
455 n_used += iwl_init_sband_channels(data, sband, n_channels, 455 n_used += iwl_init_sband_channels(data, sband, n_channels,
456 IEEE80211_BAND_5GHZ); 456 NL80211_BAND_5GHZ);
457 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, 457 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
458 tx_chains, rx_chains); 458 tx_chains, rx_chains);
459 if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) 459 if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac)
460 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, 460 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
@@ -781,7 +781,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
781 struct ieee80211_regdomain *regd; 781 struct ieee80211_regdomain *regd;
782 int size_of_regd; 782 int size_of_regd;
783 struct ieee80211_reg_rule *rule; 783 struct ieee80211_reg_rule *rule;
784 enum ieee80211_band band; 784 enum nl80211_band band;
785 int center_freq, prev_center_freq = 0; 785 int center_freq, prev_center_freq = 0;
786 int valid_rules = 0; 786 int valid_rules = 0;
787 bool new_rule; 787 bool new_rule;
@@ -809,7 +809,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
809 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 809 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
810 ch_flags = (u16)__le32_to_cpup(channels + ch_idx); 810 ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
811 band = (ch_idx < NUM_2GHZ_CHANNELS) ? 811 band = (ch_idx < NUM_2GHZ_CHANNELS) ?
812 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 812 NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
813 center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], 813 center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
814 band); 814 band);
815 new_rule = false; 815 new_rule = false;
@@ -857,7 +857,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
857 IWL_DEBUG_DEV(dev, IWL_DL_LAR, 857 IWL_DEBUG_DEV(dev, IWL_DL_LAR,
858 "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n", 858 "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
859 center_freq, 859 center_freq,
860 band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4", 860 band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
861 CHECK_AND_PRINT_I(VALID), 861 CHECK_AND_PRINT_I(VALID),
862 CHECK_AND_PRINT_I(ACTIVE), 862 CHECK_AND_PRINT_I(ACTIVE),
863 CHECK_AND_PRINT_I(RADAR), 863 CHECK_AND_PRINT_I(RADAR),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 35cdeca3d61e..a63f5bbb1ba7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -378,7 +378,7 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
378 chanctx_conf = rcu_dereference(vif->chanctx_conf); 378 chanctx_conf = rcu_dereference(vif->chanctx_conf);
379 379
380 if (!chanctx_conf || 380 if (!chanctx_conf ||
381 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { 381 chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
382 rcu_read_unlock(); 382 rcu_read_unlock();
383 return BT_COEX_INVALID_LUT; 383 return BT_COEX_INVALID_LUT;
384 } 384 }
@@ -537,7 +537,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
537 537
538 /* If channel context is invalid or not on 2.4GHz .. */ 538 /* If channel context is invalid or not on 2.4GHz .. */
539 if ((!chanctx_conf || 539 if ((!chanctx_conf ||
540 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) { 540 chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) {
541 if (vif->type == NL80211_IFTYPE_STATION) { 541 if (vif->type == NL80211_IFTYPE_STATION) {
542 /* ... relax constraints and disable rssi events */ 542 /* ... relax constraints and disable rssi events */
543 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, 543 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
@@ -857,11 +857,11 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
857} 857}
858 858
859bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, 859bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
860 enum ieee80211_band band) 860 enum nl80211_band band)
861{ 861{
862 u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); 862 u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
863 863
864 if (band != IEEE80211_BAND_2GHZ) 864 if (band != NL80211_BAND_2GHZ)
865 return false; 865 return false;
866 866
867 return bt_activity >= BT_LOW_TRAFFIC; 867 return bt_activity >= BT_LOW_TRAFFIC;
@@ -873,7 +873,7 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
873 __le16 fc = hdr->frame_control; 873 __le16 fc = hdr->frame_control;
874 bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm); 874 bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm);
875 875
876 if (info->band != IEEE80211_BAND_2GHZ) 876 if (info->band != NL80211_BAND_2GHZ)
877 return 0; 877 return 0;
878 878
879 if (unlikely(mvm->bt_tx_prio)) 879 if (unlikely(mvm->bt_tx_prio))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 3a279d3403ef..fb96bc00f022 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -724,9 +724,9 @@ static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
724 724
725 ret = kstrtou32(data, 10, &value); 725 ret = kstrtou32(data, 10, &value);
726 if (ret == 0 && value) { 726 if (ret == 0 && value) {
727 enum ieee80211_band band = (cmd->channel_num <= 14) ? 727 enum nl80211_band band = (cmd->channel_num <= 14) ?
728 IEEE80211_BAND_2GHZ : 728 NL80211_BAND_2GHZ :
729 IEEE80211_BAND_5GHZ; 729 NL80211_BAND_5GHZ;
730 struct ieee80211_channel chn = { 730 struct ieee80211_channel chn = {
731 .band = band, 731 .band = band,
732 .center_freq = ieee80211_channel_to_frequency( 732 .center_freq = ieee80211_channel_to_frequency(
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 6ad5c602e84c..9e97cf4ff1c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -980,7 +980,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
980 goto error; 980 goto error;
981 981
982 /* Add all the PHY contexts */ 982 /* Add all the PHY contexts */
983 chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; 983 chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
984 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); 984 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
985 for (i = 0; i < NUM_PHY_CTX; i++) { 985 for (i = 0; i < NUM_PHY_CTX; i++) {
986 /* 986 /*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 5f950568e92c..456067b2f48d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -559,7 +559,7 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
559 559
560static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, 560static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
561 struct ieee80211_vif *vif, 561 struct ieee80211_vif *vif,
562 enum ieee80211_band band, 562 enum nl80211_band band,
563 u8 *cck_rates, u8 *ofdm_rates) 563 u8 *cck_rates, u8 *ofdm_rates)
564{ 564{
565 struct ieee80211_supported_band *sband; 565 struct ieee80211_supported_band *sband;
@@ -730,7 +730,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
730 rcu_read_lock(); 730 rcu_read_lock();
731 chanctx = rcu_dereference(vif->chanctx_conf); 731 chanctx = rcu_dereference(vif->chanctx_conf);
732 iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band 732 iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
733 : IEEE80211_BAND_2GHZ, 733 : NL80211_BAND_2GHZ,
734 &cck_ack_rates, &ofdm_ack_rates); 734 &cck_ack_rates, &ofdm_ack_rates);
735 rcu_read_unlock(); 735 rcu_read_unlock();
736 736
@@ -1065,7 +1065,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
1065 cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << 1065 cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
1066 RATE_MCS_ANT_POS); 1066 RATE_MCS_ANT_POS);
1067 1067
1068 if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) { 1068 if (info->band == NL80211_BAND_5GHZ || vif->p2p) {
1069 rate = IWL_FIRST_OFDM_RATE; 1069 rate = IWL_FIRST_OFDM_RATE;
1070 } else { 1070 } else {
1071 rate = IWL_FIRST_CCK_RATE; 1071 rate = IWL_FIRST_CCK_RATE;
@@ -1516,7 +1516,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
1516 rx_status.device_timestamp = le32_to_cpu(sb->system_time); 1516 rx_status.device_timestamp = le32_to_cpu(sb->system_time);
1517 rx_status.band = 1517 rx_status.band =
1518 (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? 1518 (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
1519 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 1519 NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
1520 rx_status.freq = 1520 rx_status.freq =
1521 ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), 1521 ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
1522 rx_status.band); 1522 rx_status.band);
@@ -1526,5 +1526,5 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
1526 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 1526 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
1527 1527
1528 /* pass it as regular rx to mac80211 */ 1528 /* pass it as regular rx to mac80211 */
1529 ieee80211_rx_napi(mvm->hw, skb, NULL); 1529 ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
1530} 1530}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 4f5ec495b460..ef91b3770703 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -550,18 +550,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
550 else 550 else
551 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; 551 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
552 552
553 if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) 553 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
554 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 554 hw->wiphy->bands[NL80211_BAND_2GHZ] =
555 &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; 555 &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
556 if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) { 556 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
557 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 557 hw->wiphy->bands[NL80211_BAND_5GHZ] =
558 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; 558 &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
559 559
560 if (fw_has_capa(&mvm->fw->ucode_capa, 560 if (fw_has_capa(&mvm->fw->ucode_capa,
561 IWL_UCODE_TLV_CAPA_BEAMFORMER) && 561 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
562 fw_has_api(&mvm->fw->ucode_capa, 562 fw_has_api(&mvm->fw->ucode_capa,
563 IWL_UCODE_TLV_API_LQ_SS_PARAMS)) 563 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
564 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= 564 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
565 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; 565 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
566 } 566 }
567 567
@@ -2911,7 +2911,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
2911 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), 2911 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
2912 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), 2912 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
2913 /* Set the channel info data */ 2913 /* Set the channel info data */
2914 .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ? 2914 .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
2915 PHY_BAND_24 : PHY_BAND_5, 2915 PHY_BAND_24 : PHY_BAND_5,
2916 .channel_info.channel = channel->hw_value, 2916 .channel_info.channel = channel->hw_value,
2917 .channel_info.width = PHY_VHT_CHANNEL_MODE20, 2917 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 2d685e02d488..85800ba0c667 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1133,9 +1133,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
1133 1133
1134/* Utils */ 1134/* Utils */
1135int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, 1135int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
1136 enum ieee80211_band band); 1136 enum nl80211_band band);
1137void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, 1137void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
1138 enum ieee80211_band band, 1138 enum nl80211_band band,
1139 struct ieee80211_tx_rate *r); 1139 struct ieee80211_tx_rate *r);
1140u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); 1140u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
1141void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); 1141void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
@@ -1468,7 +1468,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
1468bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant); 1468bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
1469bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); 1469bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
1470bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, 1470bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
1471 enum ieee80211_band band); 1471 enum nl80211_band band);
1472u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, 1472u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
1473 struct ieee80211_tx_info *info, u8 ac); 1473 struct ieee80211_tx_info *info, u8 ac);
1474 1474
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 6e6a56f2153d..95138830b9f8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -147,7 +147,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
147 u8 active_cnt, idle_cnt; 147 u8 active_cnt, idle_cnt;
148 148
149 /* Set the channel info data */ 149 /* Set the channel info data */
150 cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? 150 cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
151 PHY_BAND_24 : PHY_BAND_5); 151 PHY_BAND_24 : PHY_BAND_5);
152 152
153 cmd->ci.channel = chandef->chan->hw_value; 153 cmd->ci.channel = chandef->chan->hw_value;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 61d0a8cd13f9..81dd2f6a48a5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -829,7 +829,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
829 829
830/* Convert a ucode rate into an rs_rate object */ 830/* Convert a ucode rate into an rs_rate object */
831static int rs_rate_from_ucode_rate(const u32 ucode_rate, 831static int rs_rate_from_ucode_rate(const u32 ucode_rate,
832 enum ieee80211_band band, 832 enum nl80211_band band,
833 struct rs_rate *rate) 833 struct rs_rate *rate)
834{ 834{
835 u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK; 835 u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
@@ -848,7 +848,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
848 if (!(ucode_rate & RATE_MCS_HT_MSK) && 848 if (!(ucode_rate & RATE_MCS_HT_MSK) &&
849 !(ucode_rate & RATE_MCS_VHT_MSK)) { 849 !(ucode_rate & RATE_MCS_VHT_MSK)) {
850 if (num_of_ant == 1) { 850 if (num_of_ant == 1) {
851 if (band == IEEE80211_BAND_5GHZ) 851 if (band == NL80211_BAND_5GHZ)
852 rate->type = LQ_LEGACY_A; 852 rate->type = LQ_LEGACY_A;
853 else 853 else
854 rate->type = LQ_LEGACY_G; 854 rate->type = LQ_LEGACY_G;
@@ -1043,7 +1043,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
1043 return; 1043 return;
1044 } else if (is_siso(rate)) { 1044 } else if (is_siso(rate)) {
1045 /* Downgrade to Legacy if we were in SISO */ 1045 /* Downgrade to Legacy if we were in SISO */
1046 if (lq_sta->band == IEEE80211_BAND_5GHZ) 1046 if (lq_sta->band == NL80211_BAND_5GHZ)
1047 rate->type = LQ_LEGACY_A; 1047 rate->type = LQ_LEGACY_A;
1048 else 1048 else
1049 rate->type = LQ_LEGACY_G; 1049 rate->type = LQ_LEGACY_G;
@@ -1850,7 +1850,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
1850 rate->ant = column->ant; 1850 rate->ant = column->ant;
1851 1851
1852 if (column->mode == RS_LEGACY) { 1852 if (column->mode == RS_LEGACY) {
1853 if (lq_sta->band == IEEE80211_BAND_5GHZ) 1853 if (lq_sta->band == NL80211_BAND_5GHZ)
1854 rate->type = LQ_LEGACY_A; 1854 rate->type = LQ_LEGACY_A;
1855 else 1855 else
1856 rate->type = LQ_LEGACY_G; 1856 rate->type = LQ_LEGACY_G;
@@ -2020,7 +2020,7 @@ static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
2020} 2020}
2021 2021
2022static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2022static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2023 struct rs_rate *rate, enum ieee80211_band band) 2023 struct rs_rate *rate, enum nl80211_band band)
2024{ 2024{
2025 int index = rate->index; 2025 int index = rate->index;
2026 bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM); 2026 bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
@@ -2126,7 +2126,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
2126 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2126 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2127 struct ieee80211_vif *vif = mvm_sta->vif; 2127 struct ieee80211_vif *vif = mvm_sta->vif;
2128 struct ieee80211_chanctx_conf *chanctx_conf; 2128 struct ieee80211_chanctx_conf *chanctx_conf;
2129 enum ieee80211_band band; 2129 enum nl80211_band band;
2130 struct iwl_rate_scale_data *window; 2130 struct iwl_rate_scale_data *window;
2131 struct rs_rate *rate = &tbl->rate; 2131 struct rs_rate *rate = &tbl->rate;
2132 enum tpc_action action; 2132 enum tpc_action action;
@@ -2148,7 +2148,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
2148 rcu_read_lock(); 2148 rcu_read_lock();
2149 chanctx_conf = rcu_dereference(vif->chanctx_conf); 2149 chanctx_conf = rcu_dereference(vif->chanctx_conf);
2150 if (WARN_ON(!chanctx_conf)) 2150 if (WARN_ON(!chanctx_conf))
2151 band = IEEE80211_NUM_BANDS; 2151 band = NUM_NL80211_BANDS;
2152 else 2152 else
2153 band = chanctx_conf->def.chan->band; 2153 band = chanctx_conf->def.chan->band;
2154 rcu_read_unlock(); 2154 rcu_read_unlock();
@@ -2606,7 +2606,7 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm,
2606 rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2; 2606 rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
2607 else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID) 2607 else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
2608 rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO; 2608 rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
2609 else if (lq_sta->band == IEEE80211_BAND_5GHZ) 2609 else if (lq_sta->band == NL80211_BAND_5GHZ)
2610 rate->type = LQ_LEGACY_A; 2610 rate->type = LQ_LEGACY_A;
2611 else 2611 else
2612 rate->type = LQ_LEGACY_G; 2612 rate->type = LQ_LEGACY_G;
@@ -2623,7 +2623,7 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm,
2623 } else { 2623 } else {
2624 lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate; 2624 lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
2625 2625
2626 if (lq_sta->band == IEEE80211_BAND_5GHZ) { 2626 if (lq_sta->band == NL80211_BAND_5GHZ) {
2627 lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy; 2627 lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
2628 lq_sta->optimal_nentries = 2628 lq_sta->optimal_nentries =
2629 ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); 2629 ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
@@ -2679,7 +2679,7 @@ static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
2679static void rs_get_initial_rate(struct iwl_mvm *mvm, 2679static void rs_get_initial_rate(struct iwl_mvm *mvm,
2680 struct ieee80211_sta *sta, 2680 struct ieee80211_sta *sta,
2681 struct iwl_lq_sta *lq_sta, 2681 struct iwl_lq_sta *lq_sta,
2682 enum ieee80211_band band, 2682 enum nl80211_band band,
2683 struct rs_rate *rate) 2683 struct rs_rate *rate)
2684{ 2684{
2685 int i, nentries; 2685 int i, nentries;
@@ -2714,7 +2714,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
2714 rate->index = find_first_bit(&lq_sta->active_legacy_rate, 2714 rate->index = find_first_bit(&lq_sta->active_legacy_rate,
2715 BITS_PER_LONG); 2715 BITS_PER_LONG);
2716 2716
2717 if (band == IEEE80211_BAND_5GHZ) { 2717 if (band == NL80211_BAND_5GHZ) {
2718 rate->type = LQ_LEGACY_A; 2718 rate->type = LQ_LEGACY_A;
2719 initial_rates = rs_optimal_rates_5ghz_legacy; 2719 initial_rates = rs_optimal_rates_5ghz_legacy;
2720 nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); 2720 nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
@@ -2814,7 +2814,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
2814static void rs_initialize_lq(struct iwl_mvm *mvm, 2814static void rs_initialize_lq(struct iwl_mvm *mvm,
2815 struct ieee80211_sta *sta, 2815 struct ieee80211_sta *sta,
2816 struct iwl_lq_sta *lq_sta, 2816 struct iwl_lq_sta *lq_sta,
2817 enum ieee80211_band band, 2817 enum nl80211_band band,
2818 bool init) 2818 bool init)
2819{ 2819{
2820 struct iwl_scale_tbl_info *tbl; 2820 struct iwl_scale_tbl_info *tbl;
@@ -3097,7 +3097,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
3097 * Called after adding a new station to initialize rate scaling 3097 * Called after adding a new station to initialize rate scaling
3098 */ 3098 */
3099void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 3099void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3100 enum ieee80211_band band, bool init) 3100 enum nl80211_band band, bool init)
3101{ 3101{
3102 int i, j; 3102 int i, j;
3103 struct ieee80211_hw *hw = mvm->hw; 3103 struct ieee80211_hw *hw = mvm->hw;
@@ -3203,7 +3203,7 @@ static void rs_rate_update(void *mvm_r,
3203#ifdef CONFIG_MAC80211_DEBUGFS 3203#ifdef CONFIG_MAC80211_DEBUGFS
3204static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, 3204static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
3205 struct iwl_lq_cmd *lq_cmd, 3205 struct iwl_lq_cmd *lq_cmd,
3206 enum ieee80211_band band, 3206 enum nl80211_band band,
3207 u32 ucode_rate) 3207 u32 ucode_rate)
3208{ 3208{
3209 struct rs_rate rate; 3209 struct rs_rate rate;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index bdb6f2d8d854..90d046fb24a0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -305,7 +305,7 @@ struct iwl_lq_sta {
305 bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */ 305 bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */
306 bool bfer_capable; /* Remote supports beamformee and we BFer */ 306 bool bfer_capable; /* Remote supports beamformee and we BFer */
307 307
308 enum ieee80211_band band; 308 enum nl80211_band band;
309 309
310 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 310 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
311 unsigned long active_legacy_rate; 311 unsigned long active_legacy_rate;
@@ -358,7 +358,7 @@ struct iwl_lq_sta {
358 358
359/* Initialize station's rate scaling information after adding station */ 359/* Initialize station's rate scaling information after adding station */
360void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 360void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
361 enum ieee80211_band band, bool init); 361 enum nl80211_band band, bool init);
362 362
363/* Notify RS about Tx status */ 363/* Notify RS about Tx status */
364void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 364void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 485cfc1a4daa..263e8a8576b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -131,7 +131,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
131 fraglen, rxb->truesize); 131 fraglen, rxb->truesize);
132 } 132 }
133 133
134 ieee80211_rx_napi(mvm->hw, skb, napi); 134 ieee80211_rx_napi(mvm->hw, NULL, skb, napi);
135} 135}
136 136
137/* 137/*
@@ -319,7 +319,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
319 rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp); 319 rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
320 rx_status->band = 320 rx_status->band =
321 (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? 321 (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
322 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 322 NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
323 rx_status->freq = 323 rx_status->freq =
324 ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), 324 ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
325 rx_status->band); 325 rx_status->band);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index b2bc3d96a13f..651604d18a32 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -210,7 +210,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
210 if (iwl_mvm_check_pn(mvm, skb, queue, sta)) 210 if (iwl_mvm_check_pn(mvm, skb, queue, sta))
211 kfree_skb(skb); 211 kfree_skb(skb);
212 else 212 else
213 ieee80211_rx_napi(mvm->hw, skb, napi); 213 ieee80211_rx_napi(mvm->hw, NULL, skb, napi);
214} 214}
215 215
216static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, 216static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
@@ -456,8 +456,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
456 456
457 rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); 457 rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
458 rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise); 458 rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
459 rx_status->band = desc->channel > 14 ? IEEE80211_BAND_5GHZ : 459 rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
460 IEEE80211_BAND_2GHZ; 460 NL80211_BAND_2GHZ;
461 rx_status->freq = ieee80211_channel_to_frequency(desc->channel, 461 rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
462 rx_status->band); 462 rx_status->band);
463 iwl_mvm_get_signal_strength(mvm, desc, rx_status); 463 iwl_mvm_get_signal_strength(mvm, desc, rx_status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index c1d1be9c5d01..6f609dd5c222 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -163,16 +163,16 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
163 return cpu_to_le16(rx_chain); 163 return cpu_to_le16(rx_chain);
164} 164}
165 165
166static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band) 166static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band)
167{ 167{
168 if (band == IEEE80211_BAND_2GHZ) 168 if (band == NL80211_BAND_2GHZ)
169 return cpu_to_le32(PHY_BAND_24); 169 return cpu_to_le32(PHY_BAND_24);
170 else 170 else
171 return cpu_to_le32(PHY_BAND_5); 171 return cpu_to_le32(PHY_BAND_5);
172} 172}
173 173
174static inline __le32 174static inline __le32
175iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, 175iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
176 bool no_cck) 176 bool no_cck)
177{ 177{
178 u32 tx_ant; 178 u32 tx_ant;
@@ -182,7 +182,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
182 mvm->scan_last_antenna_idx); 182 mvm->scan_last_antenna_idx);
183 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; 183 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
184 184
185 if (band == IEEE80211_BAND_2GHZ && !no_cck) 185 if (band == NL80211_BAND_2GHZ && !no_cck)
186 return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK | 186 return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
187 tx_ant); 187 tx_ant);
188 else 188 else
@@ -591,14 +591,14 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
591 tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | 591 tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
592 TX_CMD_FLG_BT_DIS); 592 TX_CMD_FLG_BT_DIS);
593 tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, 593 tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
594 IEEE80211_BAND_2GHZ, 594 NL80211_BAND_2GHZ,
595 no_cck); 595 no_cck);
596 tx_cmd[0].sta_id = mvm->aux_sta.sta_id; 596 tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
597 597
598 tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | 598 tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
599 TX_CMD_FLG_BT_DIS); 599 TX_CMD_FLG_BT_DIS);
600 tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, 600 tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
601 IEEE80211_BAND_5GHZ, 601 NL80211_BAND_5GHZ,
602 no_cck); 602 no_cck);
603 tx_cmd[1].sta_id = mvm->aux_sta.sta_id; 603 tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
604} 604}
@@ -695,19 +695,19 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
695 695
696 /* Insert ds parameter set element on 2.4 GHz band */ 696 /* Insert ds parameter set element on 2.4 GHz band */
697 newpos = iwl_mvm_copy_and_insert_ds_elem(mvm, 697 newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
698 ies->ies[IEEE80211_BAND_2GHZ], 698 ies->ies[NL80211_BAND_2GHZ],
699 ies->len[IEEE80211_BAND_2GHZ], 699 ies->len[NL80211_BAND_2GHZ],
700 pos); 700 pos);
701 params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf); 701 params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
702 params->preq.band_data[0].len = cpu_to_le16(newpos - pos); 702 params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
703 pos = newpos; 703 pos = newpos;
704 704
705 memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ], 705 memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
706 ies->len[IEEE80211_BAND_5GHZ]); 706 ies->len[NL80211_BAND_5GHZ]);
707 params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf); 707 params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
708 params->preq.band_data[1].len = 708 params->preq.band_data[1].len =
709 cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]); 709 cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
710 pos += ies->len[IEEE80211_BAND_5GHZ]; 710 pos += ies->len[NL80211_BAND_5GHZ];
711 711
712 memcpy(pos, ies->common_ies, ies->common_ie_len); 712 memcpy(pos, ies->common_ies, ies->common_ie_len);
713 params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf); 713 params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
@@ -921,10 +921,10 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
921 unsigned int rates = 0; 921 unsigned int rates = 0;
922 int i; 922 int i;
923 923
924 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; 924 band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
925 for (i = 0; i < band->n_bitrates; i++) 925 for (i = 0; i < band->n_bitrates; i++)
926 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); 926 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
927 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; 927 band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
928 for (i = 0; i < band->n_bitrates; i++) 928 for (i = 0; i < band->n_bitrates; i++)
929 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); 929 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
930 930
@@ -939,8 +939,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
939 struct iwl_scan_config *scan_config; 939 struct iwl_scan_config *scan_config;
940 struct ieee80211_supported_band *band; 940 struct ieee80211_supported_band *band;
941 int num_channels = 941 int num_channels =
942 mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels + 942 mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
943 mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; 943 mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
944 int ret, i, j = 0, cmd_size; 944 int ret, i, j = 0, cmd_size;
945 struct iwl_host_cmd cmd = { 945 struct iwl_host_cmd cmd = {
946 .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), 946 .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
@@ -994,10 +994,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
994 IWL_CHANNEL_FLAG_EBS_ADD | 994 IWL_CHANNEL_FLAG_EBS_ADD |
995 IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; 995 IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
996 996
997 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; 997 band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
998 for (i = 0; i < band->n_channels; i++, j++) 998 for (i = 0; i < band->n_channels; i++, j++)
999 scan_config->channel_array[j] = band->channels[i].hw_value; 999 scan_config->channel_array[j] = band->channels[i].hw_value;
1000 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; 1000 band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
1001 for (i = 0; i < band->n_channels; i++, j++) 1001 for (i = 0; i < band->n_channels; i++, j++)
1002 scan_config->channel_array[j] = band->channels[i].hw_value; 1002 scan_config->channel_array[j] = band->channels[i].hw_value;
1003 1003
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 18711c5de35a..9f160fc58cd0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -444,7 +444,7 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
444 } 444 }
445 445
446 if (chandef) { 446 if (chandef) {
447 cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? 447 cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
448 PHY_BAND_24 : PHY_BAND_5); 448 PHY_BAND_24 : PHY_BAND_5);
449 cmd.ci.channel = chandef->chan->hw_value; 449 cmd.ci.channel = chandef->chan->hw_value;
450 cmd.ci.width = iwl_mvm_get_channel_width(chandef); 450 cmd.ci.width = iwl_mvm_get_channel_width(chandef);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index efb9b98c4c98..bd286fca3776 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -359,7 +359,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
359 &mvm->nvm_data->bands[info->band], sta); 359 &mvm->nvm_data->bands[info->band], sta);
360 360
361 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 361 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
362 if (info->band == IEEE80211_BAND_5GHZ) 362 if (info->band == NL80211_BAND_5GHZ)
363 rate_idx += IWL_FIRST_OFDM_RATE; 363 rate_idx += IWL_FIRST_OFDM_RATE;
364 364
365 /* For 2.4 GHZ band, check that there is no need to remap */ 365 /* For 2.4 GHZ band, check that there is no need to remap */
@@ -372,7 +372,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
372 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), 372 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
373 mvm->mgmt_last_antenna_idx); 373 mvm->mgmt_last_antenna_idx);
374 374
375 if (info->band == IEEE80211_BAND_2GHZ && 375 if (info->band == NL80211_BAND_2GHZ &&
376 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) 376 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
377 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; 377 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
378 else 378 else
@@ -1052,7 +1052,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status)
1052#endif /* CONFIG_IWLWIFI_DEBUG */ 1052#endif /* CONFIG_IWLWIFI_DEBUG */
1053 1053
1054void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, 1054void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
1055 enum ieee80211_band band, 1055 enum nl80211_band band,
1056 struct ieee80211_tx_rate *r) 1056 struct ieee80211_tx_rate *r)
1057{ 1057{
1058 if (rate_n_flags & RATE_HT_MCS_GF_MSK) 1058 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 486c98541afc..f0ffd62f02d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -217,14 +217,14 @@ static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
217}; 217};
218 218
219int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, 219int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
220 enum ieee80211_band band) 220 enum nl80211_band band)
221{ 221{
222 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; 222 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
223 int idx; 223 int idx;
224 int band_offset = 0; 224 int band_offset = 0;
225 225
226 /* Legacy rate format, search for match in table */ 226 /* Legacy rate format, search for match in table */
227 if (band == IEEE80211_BAND_5GHZ) 227 if (band == NL80211_BAND_5GHZ)
228 band_offset = IWL_FIRST_OFDM_RATE; 228 band_offset = IWL_FIRST_OFDM_RATE;
229 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) 229 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
230 if (fw_rate_idx_to_plcp[idx] == rate) 230 if (fw_rate_idx_to_plcp[idx] == rate)
diff --git a/drivers/net/wireless/intersil/orinoco/cfg.c b/drivers/net/wireless/intersil/orinoco/cfg.c
index 0f6ea316e38e..7aa47069af0a 100644
--- a/drivers/net/wireless/intersil/orinoco/cfg.c
+++ b/drivers/net/wireless/intersil/orinoco/cfg.c
@@ -60,14 +60,14 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
60 if (priv->channel_mask & (1 << i)) { 60 if (priv->channel_mask & (1 << i)) {
61 priv->channels[i].center_freq = 61 priv->channels[i].center_freq =
62 ieee80211_channel_to_frequency(i + 1, 62 ieee80211_channel_to_frequency(i + 1,
63 IEEE80211_BAND_2GHZ); 63 NL80211_BAND_2GHZ);
64 channels++; 64 channels++;
65 } 65 }
66 } 66 }
67 priv->band.channels = priv->channels; 67 priv->band.channels = priv->channels;
68 priv->band.n_channels = channels; 68 priv->band.n_channels = channels;
69 69
70 wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 70 wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
71 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 71 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
72 72
73 i = 0; 73 i = 0;
@@ -175,7 +175,7 @@ static int orinoco_set_monitor_channel(struct wiphy *wiphy,
175 if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT) 175 if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT)
176 return -EINVAL; 176 return -EINVAL;
177 177
178 if (chandef->chan->band != IEEE80211_BAND_2GHZ) 178 if (chandef->chan->band != NL80211_BAND_2GHZ)
179 return -EINVAL; 179 return -EINVAL;
180 180
181 channel = ieee80211_frequency_to_channel(chandef->chan->center_freq); 181 channel = ieee80211_frequency_to_channel(chandef->chan->center_freq);
diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c
index e27e32851f1e..61af5a28f269 100644
--- a/drivers/net/wireless/intersil/orinoco/hw.c
+++ b/drivers/net/wireless/intersil/orinoco/hw.c
@@ -1193,7 +1193,7 @@ int orinoco_hw_get_freq(struct orinoco_private *priv)
1193 goto out; 1193 goto out;
1194 1194
1195 } 1195 }
1196 freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); 1196 freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
1197 1197
1198 out: 1198 out:
1199 orinoco_unlock(priv, &flags); 1199 orinoco_unlock(priv, &flags);
diff --git a/drivers/net/wireless/intersil/orinoco/scan.c b/drivers/net/wireless/intersil/orinoco/scan.c
index 2c66166add70..d0ceb06c72d0 100644
--- a/drivers/net/wireless/intersil/orinoco/scan.c
+++ b/drivers/net/wireless/intersil/orinoco/scan.c
@@ -111,7 +111,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
111 } 111 }
112 112
113 freq = ieee80211_channel_to_frequency( 113 freq = ieee80211_channel_to_frequency(
114 le16_to_cpu(bss->a.channel), IEEE80211_BAND_2GHZ); 114 le16_to_cpu(bss->a.channel), NL80211_BAND_2GHZ);
115 channel = ieee80211_get_channel(wiphy, freq); 115 channel = ieee80211_get_channel(wiphy, freq);
116 if (!channel) { 116 if (!channel) {
117 printk(KERN_DEBUG "Invalid channel designation %04X(%04X)", 117 printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
@@ -148,7 +148,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
148 ie_len = len - sizeof(*bss); 148 ie_len = len - sizeof(*bss);
149 ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len); 149 ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
150 chan = ie ? ie[2] : 0; 150 chan = ie ? ie[2] : 0;
151 freq = ieee80211_channel_to_frequency(chan, IEEE80211_BAND_2GHZ); 151 freq = ieee80211_channel_to_frequency(chan, NL80211_BAND_2GHZ);
152 channel = ieee80211_get_channel(wiphy, freq); 152 channel = ieee80211_get_channel(wiphy, freq);
153 153
154 timestamp = le64_to_cpu(bss->timestamp); 154 timestamp = le64_to_cpu(bss->timestamp);
diff --git a/drivers/net/wireless/intersil/p54/eeprom.c b/drivers/net/wireless/intersil/p54/eeprom.c
index 2fe713eda7ad..d4c73d39336f 100644
--- a/drivers/net/wireless/intersil/p54/eeprom.c
+++ b/drivers/net/wireless/intersil/p54/eeprom.c
@@ -76,14 +76,14 @@ struct p54_channel_entry {
76 u16 data; 76 u16 data;
77 int index; 77 int index;
78 int max_power; 78 int max_power;
79 enum ieee80211_band band; 79 enum nl80211_band band;
80}; 80};
81 81
82struct p54_channel_list { 82struct p54_channel_list {
83 struct p54_channel_entry *channels; 83 struct p54_channel_entry *channels;
84 size_t entries; 84 size_t entries;
85 size_t max_entries; 85 size_t max_entries;
86 size_t band_channel_num[IEEE80211_NUM_BANDS]; 86 size_t band_channel_num[NUM_NL80211_BANDS];
87}; 87};
88 88
89static int p54_get_band_from_freq(u16 freq) 89static int p54_get_band_from_freq(u16 freq)
@@ -91,10 +91,10 @@ static int p54_get_band_from_freq(u16 freq)
91 /* FIXME: sync these values with the 802.11 spec */ 91 /* FIXME: sync these values with the 802.11 spec */
92 92
93 if ((freq >= 2412) && (freq <= 2484)) 93 if ((freq >= 2412) && (freq <= 2484))
94 return IEEE80211_BAND_2GHZ; 94 return NL80211_BAND_2GHZ;
95 95
96 if ((freq >= 4920) && (freq <= 5825)) 96 if ((freq >= 4920) && (freq <= 5825))
97 return IEEE80211_BAND_5GHZ; 97 return NL80211_BAND_5GHZ;
98 98
99 return -1; 99 return -1;
100} 100}
@@ -124,16 +124,16 @@ static int p54_compare_rssichan(const void *_a,
124 124
125static int p54_fill_band_bitrates(struct ieee80211_hw *dev, 125static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
126 struct ieee80211_supported_band *band_entry, 126 struct ieee80211_supported_band *band_entry,
127 enum ieee80211_band band) 127 enum nl80211_band band)
128{ 128{
129 /* TODO: generate rate array dynamically */ 129 /* TODO: generate rate array dynamically */
130 130
131 switch (band) { 131 switch (band) {
132 case IEEE80211_BAND_2GHZ: 132 case NL80211_BAND_2GHZ:
133 band_entry->bitrates = p54_bgrates; 133 band_entry->bitrates = p54_bgrates;
134 band_entry->n_bitrates = ARRAY_SIZE(p54_bgrates); 134 band_entry->n_bitrates = ARRAY_SIZE(p54_bgrates);
135 break; 135 break;
136 case IEEE80211_BAND_5GHZ: 136 case NL80211_BAND_5GHZ:
137 band_entry->bitrates = p54_arates; 137 band_entry->bitrates = p54_arates;
138 band_entry->n_bitrates = ARRAY_SIZE(p54_arates); 138 band_entry->n_bitrates = ARRAY_SIZE(p54_arates);
139 break; 139 break;
@@ -147,7 +147,7 @@ static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
147static int p54_generate_band(struct ieee80211_hw *dev, 147static int p54_generate_band(struct ieee80211_hw *dev,
148 struct p54_channel_list *list, 148 struct p54_channel_list *list,
149 unsigned int *chan_num, 149 unsigned int *chan_num,
150 enum ieee80211_band band) 150 enum nl80211_band band)
151{ 151{
152 struct p54_common *priv = dev->priv; 152 struct p54_common *priv = dev->priv;
153 struct ieee80211_supported_band *tmp, *old; 153 struct ieee80211_supported_band *tmp, *old;
@@ -206,7 +206,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
206 206
207 if (j == 0) { 207 if (j == 0) {
208 wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n", 208 wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n",
209 (band == IEEE80211_BAND_2GHZ) ? 2 : 5); 209 (band == NL80211_BAND_2GHZ) ? 2 : 5);
210 210
211 ret = -ENODATA; 211 ret = -ENODATA;
212 goto err_out; 212 goto err_out;
@@ -396,7 +396,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
396 p54_compare_channels, NULL); 396 p54_compare_channels, NULL);
397 397
398 k = 0; 398 k = 0;
399 for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) { 399 for (i = 0, j = 0; i < NUM_NL80211_BANDS; i++) {
400 if (p54_generate_band(dev, list, &k, i) == 0) 400 if (p54_generate_band(dev, list, &k, i) == 0)
401 j++; 401 j++;
402 } 402 }
@@ -573,10 +573,10 @@ static int p54_parse_rssical(struct ieee80211_hw *dev,
573 for (i = 0; i < entries; i++) { 573 for (i = 0; i < entries; i++) {
574 u16 freq = 0; 574 u16 freq = 0;
575 switch (i) { 575 switch (i) {
576 case IEEE80211_BAND_2GHZ: 576 case NL80211_BAND_2GHZ:
577 freq = 2437; 577 freq = 2437;
578 break; 578 break;
579 case IEEE80211_BAND_5GHZ: 579 case NL80211_BAND_5GHZ:
580 freq = 5240; 580 freq = 5240;
581 break; 581 break;
582 } 582 }
@@ -902,11 +902,11 @@ good_eeprom:
902 if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW) 902 if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
903 p54_init_xbow_synth(priv); 903 p54_init_xbow_synth(priv);
904 if (!(synth & PDR_SYNTH_24_GHZ_DISABLED)) 904 if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
905 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = 905 dev->wiphy->bands[NL80211_BAND_2GHZ] =
906 priv->band_table[IEEE80211_BAND_2GHZ]; 906 priv->band_table[NL80211_BAND_2GHZ];
907 if (!(synth & PDR_SYNTH_5_GHZ_DISABLED)) 907 if (!(synth & PDR_SYNTH_5_GHZ_DISABLED))
908 dev->wiphy->bands[IEEE80211_BAND_5GHZ] = 908 dev->wiphy->bands[NL80211_BAND_5GHZ] =
909 priv->band_table[IEEE80211_BAND_5GHZ]; 909 priv->band_table[NL80211_BAND_5GHZ];
910 if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED) 910 if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED)
911 priv->rx_diversity_mask = 3; 911 priv->rx_diversity_mask = 3;
912 if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED) 912 if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED)
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index 7805864e76f9..d5a3bf91a03e 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -477,7 +477,7 @@ static void p54_bss_info_changed(struct ieee80211_hw *dev,
477 p54_set_edcf(priv); 477 p54_set_edcf(priv);
478 } 478 }
479 if (changed & BSS_CHANGED_BASIC_RATES) { 479 if (changed & BSS_CHANGED_BASIC_RATES) {
480 if (dev->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) 480 if (dev->conf.chandef.chan->band == NL80211_BAND_5GHZ)
481 priv->basic_rate_mask = (info->basic_rates << 4); 481 priv->basic_rate_mask = (info->basic_rates << 4);
482 else 482 else
483 priv->basic_rate_mask = info->basic_rates; 483 priv->basic_rate_mask = info->basic_rates;
@@ -829,7 +829,7 @@ void p54_free_common(struct ieee80211_hw *dev)
829 struct p54_common *priv = dev->priv; 829 struct p54_common *priv = dev->priv;
830 unsigned int i; 830 unsigned int i;
831 831
832 for (i = 0; i < IEEE80211_NUM_BANDS; i++) 832 for (i = 0; i < NUM_NL80211_BANDS; i++)
833 kfree(priv->band_table[i]); 833 kfree(priv->band_table[i]);
834 834
835 kfree(priv->iq_autocal); 835 kfree(priv->iq_autocal);
diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h
index 40b401ed6845..529939e611cd 100644
--- a/drivers/net/wireless/intersil/p54/p54.h
+++ b/drivers/net/wireless/intersil/p54/p54.h
@@ -223,7 +223,7 @@ struct p54_common {
223 struct p54_cal_database *curve_data; 223 struct p54_cal_database *curve_data;
224 struct p54_cal_database *output_limit; 224 struct p54_cal_database *output_limit;
225 struct p54_cal_database *rssi_db; 225 struct p54_cal_database *rssi_db;
226 struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS]; 226 struct ieee80211_supported_band *band_table[NUM_NL80211_BANDS];
227 227
228 /* BBP/MAC state */ 228 /* BBP/MAC state */
229 u8 mac_addr[ETH_ALEN]; 229 u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 24e5ff9a9272..1af7da0b386e 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -353,7 +353,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
353 rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi); 353 rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
354 if (hdr->rate & 0x10) 354 if (hdr->rate & 0x10)
355 rx_status->flag |= RX_FLAG_SHORTPRE; 355 rx_status->flag |= RX_FLAG_SHORTPRE;
356 if (priv->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) 356 if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
357 rx_status->rate_idx = (rate < 4) ? 0 : rate - 4; 357 rx_status->rate_idx = (rate < 4) ? 0 : rate - 4;
358 else 358 else
359 rx_status->rate_idx = rate; 359 rx_status->rate_idx = rate;
@@ -867,7 +867,7 @@ void p54_tx_80211(struct ieee80211_hw *dev,
867 for (i = 0; i < nrates && ridx < 8; i++) { 867 for (i = 0; i < nrates && ridx < 8; i++) {
868 /* we register the rates in perfect order */ 868 /* we register the rates in perfect order */
869 rate = info->control.rates[i].idx; 869 rate = info->control.rates[i].idx;
870 if (info->band == IEEE80211_BAND_5GHZ) 870 if (info->band == NL80211_BAND_5GHZ)
871 rate += 4; 871 rate += 4;
872 872
873 /* store the count we actually calculated for TX status */ 873 /* store the count we actually calculated for TX status */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index e85e0737771c..c757f14c4c00 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -255,14 +255,14 @@ static struct class *hwsim_class;
255static struct net_device *hwsim_mon; /* global monitor netdev */ 255static struct net_device *hwsim_mon; /* global monitor netdev */
256 256
257#define CHAN2G(_freq) { \ 257#define CHAN2G(_freq) { \
258 .band = IEEE80211_BAND_2GHZ, \ 258 .band = NL80211_BAND_2GHZ, \
259 .center_freq = (_freq), \ 259 .center_freq = (_freq), \
260 .hw_value = (_freq), \ 260 .hw_value = (_freq), \
261 .max_power = 20, \ 261 .max_power = 20, \
262} 262}
263 263
264#define CHAN5G(_freq) { \ 264#define CHAN5G(_freq) { \
265 .band = IEEE80211_BAND_5GHZ, \ 265 .band = NL80211_BAND_5GHZ, \
266 .center_freq = (_freq), \ 266 .center_freq = (_freq), \
267 .hw_value = (_freq), \ 267 .hw_value = (_freq), \
268 .max_power = 20, \ 268 .max_power = 20, \
@@ -479,7 +479,7 @@ struct mac80211_hwsim_data {
479 struct list_head list; 479 struct list_head list;
480 struct ieee80211_hw *hw; 480 struct ieee80211_hw *hw;
481 struct device *dev; 481 struct device *dev;
482 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 482 struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
483 struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)]; 483 struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
484 struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)]; 484 struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
485 struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)]; 485 struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
@@ -1909,6 +1909,7 @@ static void hw_scan_work(struct work_struct *work)
1909 /* send probes */ 1909 /* send probes */
1910 for (i = 0; i < req->n_ssids; i++) { 1910 for (i = 0; i < req->n_ssids; i++) {
1911 struct sk_buff *probe; 1911 struct sk_buff *probe;
1912 struct ieee80211_mgmt *mgmt;
1912 1913
1913 probe = ieee80211_probereq_get(hwsim->hw, 1914 probe = ieee80211_probereq_get(hwsim->hw,
1914 hwsim->scan_addr, 1915 hwsim->scan_addr,
@@ -1918,6 +1919,10 @@ static void hw_scan_work(struct work_struct *work)
1918 if (!probe) 1919 if (!probe)
1919 continue; 1920 continue;
1920 1921
1922 mgmt = (struct ieee80211_mgmt *) probe->data;
1923 memcpy(mgmt->da, req->bssid, ETH_ALEN);
1924 memcpy(mgmt->bssid, req->bssid, ETH_ALEN);
1925
1921 if (req->ie_len) 1926 if (req->ie_len)
1922 memcpy(skb_put(probe, req->ie_len), req->ie, 1927 memcpy(skb_put(probe, req->ie_len), req->ie,
1923 req->ie_len); 1928 req->ie_len);
@@ -2342,7 +2347,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2342 u8 addr[ETH_ALEN]; 2347 u8 addr[ETH_ALEN];
2343 struct mac80211_hwsim_data *data; 2348 struct mac80211_hwsim_data *data;
2344 struct ieee80211_hw *hw; 2349 struct ieee80211_hw *hw;
2345 enum ieee80211_band band; 2350 enum nl80211_band band;
2346 const struct ieee80211_ops *ops = &mac80211_hwsim_ops; 2351 const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
2347 int idx; 2352 int idx;
2348 2353
@@ -2471,16 +2476,16 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2471 sizeof(hwsim_channels_5ghz)); 2476 sizeof(hwsim_channels_5ghz));
2472 memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates)); 2477 memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
2473 2478
2474 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { 2479 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
2475 struct ieee80211_supported_band *sband = &data->bands[band]; 2480 struct ieee80211_supported_band *sband = &data->bands[band];
2476 switch (band) { 2481 switch (band) {
2477 case IEEE80211_BAND_2GHZ: 2482 case NL80211_BAND_2GHZ:
2478 sband->channels = data->channels_2ghz; 2483 sband->channels = data->channels_2ghz;
2479 sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz); 2484 sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz);
2480 sband->bitrates = data->rates; 2485 sband->bitrates = data->rates;
2481 sband->n_bitrates = ARRAY_SIZE(hwsim_rates); 2486 sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
2482 break; 2487 break;
2483 case IEEE80211_BAND_5GHZ: 2488 case NL80211_BAND_5GHZ:
2484 sband->channels = data->channels_5ghz; 2489 sband->channels = data->channels_5ghz;
2485 sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz); 2490 sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
2486 sband->bitrates = data->rates + 4; 2491 sband->bitrates = data->rates + 4;
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 2eea76a340b7..776b44bfd93a 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -23,7 +23,7 @@
23 23
24 24
25#define CHAN2G(_channel, _freq, _flags) { \ 25#define CHAN2G(_channel, _freq, _flags) { \
26 .band = IEEE80211_BAND_2GHZ, \ 26 .band = NL80211_BAND_2GHZ, \
27 .center_freq = (_freq), \ 27 .center_freq = (_freq), \
28 .hw_value = (_channel), \ 28 .hw_value = (_channel), \
29 .flags = (_flags), \ 29 .flags = (_flags), \
@@ -639,7 +639,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
639 if (chan_no != -1) { 639 if (chan_no != -1) {
640 struct wiphy *wiphy = priv->wdev->wiphy; 640 struct wiphy *wiphy = priv->wdev->wiphy;
641 int freq = ieee80211_channel_to_frequency(chan_no, 641 int freq = ieee80211_channel_to_frequency(chan_no,
642 IEEE80211_BAND_2GHZ); 642 NL80211_BAND_2GHZ);
643 struct ieee80211_channel *channel = 643 struct ieee80211_channel *channel =
644 ieee80211_get_channel(wiphy, freq); 644 ieee80211_get_channel(wiphy, freq);
645 645
@@ -1266,7 +1266,7 @@ _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme)
1266{ 1266{
1267 struct cfg80211_scan_request *creq = NULL; 1267 struct cfg80211_scan_request *creq = NULL;
1268 int i, n_channels = ieee80211_get_num_supported_channels(wiphy); 1268 int i, n_channels = ieee80211_get_num_supported_channels(wiphy);
1269 enum ieee80211_band band; 1269 enum nl80211_band band;
1270 1270
1271 creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + 1271 creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
1272 n_channels * sizeof(void *), 1272 n_channels * sizeof(void *),
@@ -1281,7 +1281,7 @@ _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme)
1281 1281
1282 /* Scan all available channels */ 1282 /* Scan all available channels */
1283 i = 0; 1283 i = 0;
1284 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1284 for (band = 0; band < NUM_NL80211_BANDS; band++) {
1285 int j; 1285 int j;
1286 1286
1287 if (!wiphy->bands[band]) 1287 if (!wiphy->bands[band])
@@ -2200,7 +2200,7 @@ int lbs_cfg_register(struct lbs_private *priv)
2200 if (lbs_mesh_activated(priv)) 2200 if (lbs_mesh_activated(priv))
2201 wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MESH_POINT); 2201 wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MESH_POINT);
2202 2202
2203 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz; 2203 wdev->wiphy->bands[NL80211_BAND_2GHZ] = &lbs_band_2ghz;
2204 2204
2205 /* 2205 /*
2206 * We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have 2206 * We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 4ddd0e5a6b85..301170cccfff 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -743,7 +743,7 @@ int lbs_set_11d_domain_info(struct lbs_private *priv)
743 struct cmd_ds_802_11d_domain_info cmd; 743 struct cmd_ds_802_11d_domain_info cmd;
744 struct mrvl_ie_domain_param_set *domain = &cmd.domain; 744 struct mrvl_ie_domain_param_set *domain = &cmd.domain;
745 struct ieee80211_country_ie_triplet *t; 745 struct ieee80211_country_ie_triplet *t;
746 enum ieee80211_band band; 746 enum nl80211_band band;
747 struct ieee80211_channel *ch; 747 struct ieee80211_channel *ch;
748 u8 num_triplet = 0; 748 u8 num_triplet = 0;
749 u8 num_parsed_chan = 0; 749 u8 num_parsed_chan = 0;
@@ -777,7 +777,7 @@ int lbs_set_11d_domain_info(struct lbs_private *priv)
777 * etc. 777 * etc.
778 */ 778 */
779 for (band = 0; 779 for (band = 0;
780 (band < IEEE80211_NUM_BANDS) && (num_triplet < MAX_11D_TRIPLETS); 780 (band < NUM_NL80211_BANDS) && (num_triplet < MAX_11D_TRIPLETS);
781 band++) { 781 band++) {
782 782
783 if (!bands[band]) 783 if (!bands[band])
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index a47f0acc099a..0bf8916a02cf 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -570,7 +570,7 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
570 if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK))) 570 if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
571 stats.flag |= RX_FLAG_FAILED_FCS_CRC; 571 stats.flag |= RX_FLAG_FAILED_FCS_CRC;
572 stats.freq = priv->cur_freq; 572 stats.freq = priv->cur_freq;
573 stats.band = IEEE80211_BAND_2GHZ; 573 stats.band = NL80211_BAND_2GHZ;
574 stats.signal = prxpd->snr; 574 stats.signal = prxpd->snr;
575 priv->noise = prxpd->nf; 575 priv->noise = prxpd->nf;
576 /* Marvell rate index has a hole at value 4 */ 576 /* Marvell rate index has a hole at value 4 */
@@ -642,7 +642,7 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
642 priv->band.bitrates = priv->rates; 642 priv->band.bitrates = priv->rates;
643 priv->band.n_channels = ARRAY_SIZE(lbtf_channels); 643 priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
644 priv->band.channels = priv->channels; 644 priv->band.channels = priv->channels;
645 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 645 hw->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
646 hw->wiphy->interface_modes = 646 hw->wiphy->interface_modes =
647 BIT(NL80211_IFTYPE_STATION) | 647 BIT(NL80211_IFTYPE_STATION) |
648 BIT(NL80211_IFTYPE_ADHOC); 648 BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 09578c6cde59..a74cc43b1953 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -59,7 +59,10 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
59 skb->len); 59 skb->len);
60 } 60 }
61 61
62 ret = mwifiex_recv_packet(priv, rx_skb); 62 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
63 ret = mwifiex_uap_recv_packet(priv, rx_skb);
64 else
65 ret = mwifiex_recv_packet(priv, rx_skb);
63 if (ret == -1) 66 if (ret == -1)
64 mwifiex_dbg(priv->adapter, ERROR, 67 mwifiex_dbg(priv->adapter, ERROR,
65 "Rx of A-MSDU failed"); 68 "Rx of A-MSDU failed");
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index bb7235e1b9d1..6db202fa7157 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -474,7 +474,7 @@ int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
474 u8 no_of_parsed_chan = 0; 474 u8 no_of_parsed_chan = 0;
475 u8 first_chan = 0, next_chan = 0, max_pwr = 0; 475 u8 first_chan = 0, next_chan = 0, max_pwr = 0;
476 u8 i, flag = 0; 476 u8 i, flag = 0;
477 enum ieee80211_band band; 477 enum nl80211_band band;
478 struct ieee80211_supported_band *sband; 478 struct ieee80211_supported_band *sband;
479 struct ieee80211_channel *ch; 479 struct ieee80211_channel *ch;
480 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); 480 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
@@ -1410,7 +1410,7 @@ mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
1410{ 1410{
1411 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1411 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1412 struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats; 1412 struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats;
1413 enum ieee80211_band band; 1413 enum nl80211_band band;
1414 1414
1415 mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx); 1415 mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx);
1416 1416
@@ -1586,7 +1586,7 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
1586{ 1586{
1587 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1587 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1588 u16 bitmap_rates[MAX_BITMAP_RATES_SIZE]; 1588 u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
1589 enum ieee80211_band band; 1589 enum nl80211_band band;
1590 struct mwifiex_adapter *adapter = priv->adapter; 1590 struct mwifiex_adapter *adapter = priv->adapter;
1591 1591
1592 if (!priv->media_connected) { 1592 if (!priv->media_connected) {
@@ -1600,11 +1600,11 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
1600 memset(bitmap_rates, 0, sizeof(bitmap_rates)); 1600 memset(bitmap_rates, 0, sizeof(bitmap_rates));
1601 1601
1602 /* Fill HR/DSSS rates. */ 1602 /* Fill HR/DSSS rates. */
1603 if (band == IEEE80211_BAND_2GHZ) 1603 if (band == NL80211_BAND_2GHZ)
1604 bitmap_rates[0] = mask->control[band].legacy & 0x000f; 1604 bitmap_rates[0] = mask->control[band].legacy & 0x000f;
1605 1605
1606 /* Fill OFDM rates */ 1606 /* Fill OFDM rates */
1607 if (band == IEEE80211_BAND_2GHZ) 1607 if (band == NL80211_BAND_2GHZ)
1608 bitmap_rates[1] = (mask->control[band].legacy & 0x0ff0) >> 4; 1608 bitmap_rates[1] = (mask->control[band].legacy & 0x0ff0) >> 4;
1609 else 1609 else
1610 bitmap_rates[1] = mask->control[band].legacy; 1610 bitmap_rates[1] = mask->control[band].legacy;
@@ -1771,7 +1771,7 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
1771 } else { 1771 } else {
1772 struct ieee80211_sta_ht_cap *ht_info; 1772 struct ieee80211_sta_ht_cap *ht_info;
1773 int rx_mcs_supp; 1773 int rx_mcs_supp;
1774 enum ieee80211_band band; 1774 enum nl80211_band band;
1775 1775
1776 if ((tx_ant == 0x1 && rx_ant == 0x1)) { 1776 if ((tx_ant == 0x1 && rx_ant == 0x1)) {
1777 adapter->user_dev_mcs_support = HT_STREAM_1X1; 1777 adapter->user_dev_mcs_support = HT_STREAM_1X1;
@@ -1785,7 +1785,7 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
1785 MWIFIEX_11AC_MCS_MAP_2X2; 1785 MWIFIEX_11AC_MCS_MAP_2X2;
1786 } 1786 }
1787 1787
1788 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1788 for (band = 0; band < NUM_NL80211_BANDS; band++) {
1789 if (!adapter->wiphy->bands[band]) 1789 if (!adapter->wiphy->bands[band])
1790 continue; 1790 continue;
1791 1791
@@ -1997,7 +1997,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
1997 struct cfg80211_bss *bss; 1997 struct cfg80211_bss *bss;
1998 int ie_len; 1998 int ie_len;
1999 u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)]; 1999 u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
2000 enum ieee80211_band band; 2000 enum nl80211_band band;
2001 2001
2002 if (mwifiex_get_bss_info(priv, &bss_info)) 2002 if (mwifiex_get_bss_info(priv, &bss_info))
2003 return -1; 2003 return -1;
@@ -2271,7 +2271,7 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
2271 int index = 0, i; 2271 int index = 0, i;
2272 u8 config_bands = 0; 2272 u8 config_bands = 0;
2273 2273
2274 if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) { 2274 if (params->chandef.chan->band == NL80211_BAND_2GHZ) {
2275 if (!params->basic_rates) { 2275 if (!params->basic_rates) {
2276 config_bands = BAND_B | BAND_G; 2276 config_bands = BAND_B | BAND_G;
2277 } else { 2277 } else {
@@ -2859,18 +2859,18 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2859 mwifiex_init_priv_params(priv, dev); 2859 mwifiex_init_priv_params(priv, dev);
2860 priv->netdev = dev; 2860 priv->netdev = dev;
2861 2861
2862 mwifiex_setup_ht_caps(&wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv); 2862 mwifiex_setup_ht_caps(&wiphy->bands[NL80211_BAND_2GHZ]->ht_cap, priv);
2863 if (adapter->is_hw_11ac_capable) 2863 if (adapter->is_hw_11ac_capable)
2864 mwifiex_setup_vht_caps( 2864 mwifiex_setup_vht_caps(
2865 &wiphy->bands[IEEE80211_BAND_2GHZ]->vht_cap, priv); 2865 &wiphy->bands[NL80211_BAND_2GHZ]->vht_cap, priv);
2866 2866
2867 if (adapter->config_bands & BAND_A) 2867 if (adapter->config_bands & BAND_A)
2868 mwifiex_setup_ht_caps( 2868 mwifiex_setup_ht_caps(
2869 &wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv); 2869 &wiphy->bands[NL80211_BAND_5GHZ]->ht_cap, priv);
2870 2870
2871 if ((adapter->config_bands & BAND_A) && adapter->is_hw_11ac_capable) 2871 if ((adapter->config_bands & BAND_A) && adapter->is_hw_11ac_capable)
2872 mwifiex_setup_vht_caps( 2872 mwifiex_setup_vht_caps(
2873 &wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap, priv); 2873 &wiphy->bands[NL80211_BAND_5GHZ]->vht_cap, priv);
2874 2874
2875 dev_net_set(dev, wiphy_net(wiphy)); 2875 dev_net_set(dev, wiphy_net(wiphy));
2876 dev->ieee80211_ptr = &priv->wdev; 2876 dev->ieee80211_ptr = &priv->wdev;
@@ -3272,8 +3272,11 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
3272 3272
3273 for (i = 0; i < adapter->priv_num; i++) { 3273 for (i = 0; i < adapter->priv_num; i++) {
3274 priv = adapter->priv[i]; 3274 priv = adapter->priv[i];
3275 if (priv && priv->netdev) 3275 if (priv && priv->netdev) {
3276 mwifiex_stop_net_dev_queue(priv->netdev, adapter); 3276 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
3277 if (netif_carrier_ok(priv->netdev))
3278 netif_carrier_off(priv->netdev);
3279 }
3277 } 3280 }
3278 3281
3279 for (i = 0; i < retry_num; i++) { 3282 for (i = 0; i < retry_num; i++) {
@@ -3344,8 +3347,11 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
3344 3347
3345 for (i = 0; i < adapter->priv_num; i++) { 3348 for (i = 0; i < adapter->priv_num; i++) {
3346 priv = adapter->priv[i]; 3349 priv = adapter->priv[i];
3347 if (priv && priv->netdev) 3350 if (priv && priv->netdev) {
3351 if (!netif_carrier_ok(priv->netdev))
3352 netif_carrier_on(priv->netdev);
3348 mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); 3353 mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
3354 }
3349 } 3355 }
3350 3356
3351 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); 3357 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
@@ -3384,6 +3390,10 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
3384 break; 3390 break;
3385 case MANAGEMENT_FRAME_MATCHED: 3391 case MANAGEMENT_FRAME_MATCHED:
3386 break; 3392 break;
3393 case GTK_REKEY_FAILURE:
3394 if (wiphy->wowlan_config->gtk_rekey_failure)
3395 wakeup_report.gtk_rekey_failure = true;
3396 break;
3387 default: 3397 default:
3388 break; 3398 break;
3389 } 3399 }
@@ -3410,6 +3420,16 @@ static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy,
3410 3420
3411 device_set_wakeup_enable(adapter->dev, enabled); 3421 device_set_wakeup_enable(adapter->dev, enabled);
3412} 3422}
3423
3424static int mwifiex_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
3425 struct cfg80211_gtk_rekey_data *data)
3426{
3427 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
3428
3429 return mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG,
3430 HostCmd_ACT_GEN_SET, 0, data, true);
3431}
3432
3413#endif 3433#endif
3414 3434
3415static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq) 3435static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq)
@@ -3801,7 +3821,7 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
3801 struct ieee80211_channel *chan; 3821 struct ieee80211_channel *chan;
3802 u8 second_chan_offset; 3822 u8 second_chan_offset;
3803 enum nl80211_channel_type chan_type; 3823 enum nl80211_channel_type chan_type;
3804 enum ieee80211_band band; 3824 enum nl80211_band band;
3805 int freq; 3825 int freq;
3806 int ret = -ENODATA; 3826 int ret = -ENODATA;
3807 3827
@@ -3932,6 +3952,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
3932 .suspend = mwifiex_cfg80211_suspend, 3952 .suspend = mwifiex_cfg80211_suspend,
3933 .resume = mwifiex_cfg80211_resume, 3953 .resume = mwifiex_cfg80211_resume,
3934 .set_wakeup = mwifiex_cfg80211_set_wakeup, 3954 .set_wakeup = mwifiex_cfg80211_set_wakeup,
3955 .set_rekey_data = mwifiex_set_rekey_data,
3935#endif 3956#endif
3936 .set_coalesce = mwifiex_cfg80211_set_coalesce, 3957 .set_coalesce = mwifiex_cfg80211_set_coalesce,
3937 .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt, 3958 .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
@@ -3948,7 +3969,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
3948#ifdef CONFIG_PM 3969#ifdef CONFIG_PM
3949static const struct wiphy_wowlan_support mwifiex_wowlan_support = { 3970static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
3950 .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | 3971 .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
3951 WIPHY_WOWLAN_NET_DETECT, 3972 WIPHY_WOWLAN_NET_DETECT | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
3973 WIPHY_WOWLAN_GTK_REKEY_FAILURE,
3952 .n_patterns = MWIFIEX_MEF_MAX_FILTERS, 3974 .n_patterns = MWIFIEX_MEF_MAX_FILTERS,
3953 .pattern_min_len = 1, 3975 .pattern_min_len = 1,
3954 .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, 3976 .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
@@ -4031,11 +4053,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
4031 BIT(NL80211_IFTYPE_P2P_GO) | 4053 BIT(NL80211_IFTYPE_P2P_GO) |
4032 BIT(NL80211_IFTYPE_AP); 4054 BIT(NL80211_IFTYPE_AP);
4033 4055
4034 wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz; 4056 wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
4035 if (adapter->config_bands & BAND_A) 4057 if (adapter->config_bands & BAND_A)
4036 wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz; 4058 wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
4037 else 4059 else
4038 wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; 4060 wiphy->bands[NL80211_BAND_5GHZ] = NULL;
4039 4061
4040 if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info)) 4062 if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
4041 wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs; 4063 wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
@@ -4086,6 +4108,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
4086 4108
4087 wiphy->features |= NL80211_FEATURE_HT_IBSS | 4109 wiphy->features |= NL80211_FEATURE_HT_IBSS |
4088 NL80211_FEATURE_INACTIVITY_TIMER | 4110 NL80211_FEATURE_INACTIVITY_TIMER |
4111 NL80211_FEATURE_LOW_PRIORITY_SCAN |
4089 NL80211_FEATURE_NEED_OBSS_SCAN; 4112 NL80211_FEATURE_NEED_OBSS_SCAN;
4090 4113
4091 if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) 4114 if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index 09fae27140f7..1ff22055e54f 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -322,9 +322,9 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
322 return cfp; 322 return cfp;
323 323
324 if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG) 324 if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
325 sband = priv->wdev.wiphy->bands[IEEE80211_BAND_2GHZ]; 325 sband = priv->wdev.wiphy->bands[NL80211_BAND_2GHZ];
326 else 326 else
327 sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ]; 327 sband = priv->wdev.wiphy->bands[NL80211_BAND_5GHZ];
328 328
329 if (!sband) { 329 if (!sband) {
330 mwifiex_dbg(priv->adapter, ERROR, 330 mwifiex_dbg(priv->adapter, ERROR,
@@ -399,15 +399,15 @@ u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv,
399 int i; 399 int i;
400 400
401 if (radio_type) { 401 if (radio_type) {
402 sband = wiphy->bands[IEEE80211_BAND_5GHZ]; 402 sband = wiphy->bands[NL80211_BAND_5GHZ];
403 if (WARN_ON_ONCE(!sband)) 403 if (WARN_ON_ONCE(!sband))
404 return 0; 404 return 0;
405 rate_mask = request->rates[IEEE80211_BAND_5GHZ]; 405 rate_mask = request->rates[NL80211_BAND_5GHZ];
406 } else { 406 } else {
407 sband = wiphy->bands[IEEE80211_BAND_2GHZ]; 407 sband = wiphy->bands[NL80211_BAND_2GHZ];
408 if (WARN_ON_ONCE(!sband)) 408 if (WARN_ON_ONCE(!sband))
409 return 0; 409 return 0;
410 rate_mask = request->rates[IEEE80211_BAND_2GHZ]; 410 rate_mask = request->rates[NL80211_BAND_2GHZ];
411 } 411 }
412 412
413 num_rates = 0; 413 num_rates = 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index c134cf865291..8e4145abdbfa 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -372,6 +372,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
372#define HostCmd_CMD_COALESCE_CFG 0x010a 372#define HostCmd_CMD_COALESCE_CFG 0x010a
373#define HostCmd_CMD_MGMT_FRAME_REG 0x010c 373#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
374#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d 374#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
375#define HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG 0x010f
375#define HostCmd_CMD_11AC_CFG 0x0112 376#define HostCmd_CMD_11AC_CFG 0x0112
376#define HostCmd_CMD_HS_WAKEUP_REASON 0x0116 377#define HostCmd_CMD_HS_WAKEUP_REASON 0x0116
377#define HostCmd_CMD_TDLS_CONFIG 0x0100 378#define HostCmd_CMD_TDLS_CONFIG 0x0100
@@ -619,6 +620,7 @@ enum HS_WAKEUP_REASON {
619 MAGIC_PATTERN_MATCHED, 620 MAGIC_PATTERN_MATCHED,
620 CONTROL_FRAME_MATCHED, 621 CONTROL_FRAME_MATCHED,
621 MANAGEMENT_FRAME_MATCHED, 622 MANAGEMENT_FRAME_MATCHED,
623 GTK_REKEY_FAILURE,
622 RESERVED 624 RESERVED
623}; 625};
624 626
@@ -2183,6 +2185,14 @@ struct host_cmd_ds_wakeup_reason {
2183 u16 wakeup_reason; 2185 u16 wakeup_reason;
2184} __packed; 2186} __packed;
2185 2187
2188struct host_cmd_ds_gtk_rekey_params {
2189 __le16 action;
2190 u8 kck[NL80211_KCK_LEN];
2191 u8 kek[NL80211_KEK_LEN];
2192 __le32 replay_ctr_low;
2193 __le32 replay_ctr_high;
2194} __packed;
2195
2186struct host_cmd_ds_command { 2196struct host_cmd_ds_command {
2187 __le16 command; 2197 __le16 command;
2188 __le16 size; 2198 __le16 size;
@@ -2256,6 +2266,7 @@ struct host_cmd_ds_command {
2256 struct host_cmd_ds_multi_chan_policy mc_policy; 2266 struct host_cmd_ds_multi_chan_policy mc_policy;
2257 struct host_cmd_ds_robust_coex coex; 2267 struct host_cmd_ds_robust_coex coex;
2258 struct host_cmd_ds_wakeup_reason hs_wakeup_reason; 2268 struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
2269 struct host_cmd_ds_gtk_rekey_params rekey;
2259 } params; 2270 } params;
2260} __packed; 2271} __packed;
2261 2272
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 3cfa94677a8e..04b975cbb330 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1074,12 +1074,14 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
1074 priv->netdev->name, priv->num_tx_timeout); 1074 priv->netdev->name, priv->num_tx_timeout);
1075 } 1075 }
1076 1076
1077 if (adapter->iface_type == MWIFIEX_SDIO) { 1077 if (adapter->iface_type == MWIFIEX_SDIO ||
1078 p += sprintf(p, "\n=== SDIO register dump===\n"); 1078 adapter->iface_type == MWIFIEX_PCIE) {
1079 p += sprintf(p, "\n=== %s register dump===\n",
1080 adapter->iface_type == MWIFIEX_SDIO ?
1081 "SDIO" : "PCIE");
1079 if (adapter->if_ops.reg_dump) 1082 if (adapter->if_ops.reg_dump)
1080 p += adapter->if_ops.reg_dump(adapter, p); 1083 p += adapter->if_ops.reg_dump(adapter, p);
1081 } 1084 }
1082
1083 p += sprintf(p, "\n=== more debug information\n"); 1085 p += sprintf(p, "\n=== more debug information\n");
1084 debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL); 1086 debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
1085 if (debug_info) { 1087 if (debug_info) {
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index aafc4ab4e5ae..a159fbef20cd 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1019,6 +1019,8 @@ int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter);
1019int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *); 1019int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
1020 1020
1021int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb); 1021int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb);
1022int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
1023 struct sk_buff *skb);
1022 1024
1023int mwifiex_process_mgmt_packet(struct mwifiex_private *priv, 1025int mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
1024 struct sk_buff *skb); 1026 struct sk_buff *skb);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index de364381fe7b..edf8b070f665 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -190,7 +190,6 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
190 190
191 if (ent->driver_data) { 191 if (ent->driver_data) {
192 struct mwifiex_pcie_device *data = (void *)ent->driver_data; 192 struct mwifiex_pcie_device *data = (void *)ent->driver_data;
193 card->pcie.firmware = data->firmware;
194 card->pcie.reg = data->reg; 193 card->pcie.reg = data->reg;
195 card->pcie.blksz_fw_dl = data->blksz_fw_dl; 194 card->pcie.blksz_fw_dl = data->blksz_fw_dl;
196 card->pcie.tx_buf_size = data->tx_buf_size; 195 card->pcie.tx_buf_size = data->tx_buf_size;
@@ -269,6 +268,11 @@ static const struct pci_device_id mwifiex_ids[] = {
269 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 268 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
270 .driver_data = (unsigned long)&mwifiex_pcie8997, 269 .driver_data = (unsigned long)&mwifiex_pcie8997,
271 }, 270 },
271 {
272 PCIE_VENDOR_ID_V2_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
273 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
274 .driver_data = (unsigned long)&mwifiex_pcie8997,
275 },
272 {}, 276 {},
273}; 277};
274 278
@@ -2351,6 +2355,47 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
2351 return 0; 2355 return 0;
2352} 2356}
2353 2357
2358/* Function to dump PCIE scratch registers in case of FW crash
2359 */
2360static int
2361mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
2362{
2363 char *p = drv_buf;
2364 char buf[256], *ptr;
2365 int i;
2366 u32 value;
2367 struct pcie_service_card *card = adapter->card;
2368 const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
2369 int pcie_scratch_reg[] = {PCIE_SCRATCH_12_REG,
2370 PCIE_SCRATCH_13_REG,
2371 PCIE_SCRATCH_14_REG};
2372
2373 if (!p)
2374 return 0;
2375
2376 mwifiex_dbg(adapter, MSG, "PCIE register dump start\n");
2377
2378 if (mwifiex_read_reg(adapter, reg->fw_status, &value)) {
2379 mwifiex_dbg(adapter, ERROR, "failed to read firmware status");
2380 return 0;
2381 }
2382
2383 ptr = buf;
2384 mwifiex_dbg(adapter, MSG, "pcie scratch register:");
2385 for (i = 0; i < ARRAY_SIZE(pcie_scratch_reg); i++) {
2386 mwifiex_read_reg(adapter, pcie_scratch_reg[i], &value);
2387 ptr += sprintf(ptr, "reg:0x%x, value=0x%x\n",
2388 pcie_scratch_reg[i], value);
2389 }
2390
2391 mwifiex_dbg(adapter, MSG, "%s\n", buf);
2392 p += sprintf(p, "%s\n", buf);
2393
2394 mwifiex_dbg(adapter, MSG, "PCIE register dump end\n");
2395
2396 return p - drv_buf;
2397}
2398
2354/* This function read/write firmware */ 2399/* This function read/write firmware */
2355static enum rdwr_status 2400static enum rdwr_status
2356mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) 2401mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
@@ -2759,6 +2804,51 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
2759} 2804}
2760 2805
2761/* 2806/*
2807 * This function get firmare name for downloading by revision id
2808 *
2809 * Read revision id register to get revision id
2810 */
2811static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
2812{
2813 int revision_id = 0;
2814 struct pcie_service_card *card = adapter->card;
2815
2816 switch (card->dev->device) {
2817 case PCIE_DEVICE_ID_MARVELL_88W8766P:
2818 strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME);
2819 break;
2820 case PCIE_DEVICE_ID_MARVELL_88W8897:
2821 mwifiex_write_reg(adapter, 0x0c58, 0x80c00000);
2822 mwifiex_read_reg(adapter, 0x0c58, &revision_id);
2823 revision_id &= 0xff00;
2824 switch (revision_id) {
2825 case PCIE8897_A0:
2826 strcpy(adapter->fw_name, PCIE8897_A0_FW_NAME);
2827 break;
2828 case PCIE8897_B0:
2829 strcpy(adapter->fw_name, PCIE8897_B0_FW_NAME);
2830 break;
2831 default:
2832 break;
2833 }
2834 case PCIE_DEVICE_ID_MARVELL_88W8997:
2835 mwifiex_read_reg(adapter, 0x0c48, &revision_id);
2836 switch (revision_id) {
2837 case PCIE8997_V2:
2838 strcpy(adapter->fw_name, PCIE8997_FW_NAME_V2);
2839 break;
2840 case PCIE8997_Z:
2841 strcpy(adapter->fw_name, PCIE8997_FW_NAME_Z);
2842 break;
2843 default:
2844 break;
2845 }
2846 default:
2847 break;
2848 }
2849}
2850
2851/*
2762 * This function registers the PCIE device. 2852 * This function registers the PCIE device.
2763 * 2853 *
2764 * PCIE IRQ is claimed, block size is set and driver data is initialized. 2854 * PCIE IRQ is claimed, block size is set and driver data is initialized.
@@ -2778,8 +2868,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
2778 adapter->tx_buf_size = card->pcie.tx_buf_size; 2868 adapter->tx_buf_size = card->pcie.tx_buf_size;
2779 adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl; 2869 adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl;
2780 adapter->num_mem_types = card->pcie.num_mem_types; 2870 adapter->num_mem_types = card->pcie.num_mem_types;
2781 strcpy(adapter->fw_name, card->pcie.firmware);
2782 adapter->ext_scan = card->pcie.can_ext_scan; 2871 adapter->ext_scan = card->pcie.can_ext_scan;
2872 mwifiex_pcie_get_fw_name(adapter);
2783 2873
2784 return 0; 2874 return 0;
2785} 2875}
@@ -2850,6 +2940,7 @@ static struct mwifiex_if_ops pcie_ops = {
2850 .cleanup_mpa_buf = NULL, 2940 .cleanup_mpa_buf = NULL,
2851 .init_fw_port = mwifiex_pcie_init_fw_port, 2941 .init_fw_port = mwifiex_pcie_init_fw_port,
2852 .clean_pcie_ring = mwifiex_clean_pcie_ring_buf, 2942 .clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
2943 .reg_dump = mwifiex_pcie_reg_dump,
2853 .device_dump = mwifiex_pcie_device_dump, 2944 .device_dump = mwifiex_pcie_device_dump,
2854}; 2945};
2855 2946
@@ -2907,6 +2998,3 @@ MODULE_AUTHOR("Marvell International Ltd.");
2907MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION); 2998MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION);
2908MODULE_VERSION(PCIE_VERSION); 2999MODULE_VERSION(PCIE_VERSION);
2909MODULE_LICENSE("GPL v2"); 3000MODULE_LICENSE("GPL v2");
2910MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME);
2911MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME);
2912MODULE_FIRMWARE(PCIE8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index 29e58ce877e3..cc7a5df903be 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -30,14 +30,22 @@
30#include "main.h" 30#include "main.h"
31 31
32#define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin" 32#define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
33#define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin" 33#define PCIE8897_A0_FW_NAME "mrvl/pcie8897_uapsta_a0.bin"
34#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcie8997_uapsta.bin" 34#define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin"
35#define PCIE8997_FW_NAME_Z "mrvl/pcieusb8997_combo.bin"
36#define PCIE8997_FW_NAME_V2 "mrvl/pcieusb8997_combo_v2.bin"
35 37
36#define PCIE_VENDOR_ID_MARVELL (0x11ab) 38#define PCIE_VENDOR_ID_MARVELL (0x11ab)
39#define PCIE_VENDOR_ID_V2_MARVELL (0x1b4b)
37#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30) 40#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30)
38#define PCIE_DEVICE_ID_MARVELL_88W8897 (0x2b38) 41#define PCIE_DEVICE_ID_MARVELL_88W8897 (0x2b38)
39#define PCIE_DEVICE_ID_MARVELL_88W8997 (0x2b42) 42#define PCIE_DEVICE_ID_MARVELL_88W8997 (0x2b42)
40 43
44#define PCIE8897_A0 0x1100
45#define PCIE8897_B0 0x1200
46#define PCIE8997_Z 0x0
47#define PCIE8997_V2 0x471
48
41/* Constants for Buffer Descriptor (BD) rings */ 49/* Constants for Buffer Descriptor (BD) rings */
42#define MWIFIEX_MAX_TXRX_BD 0x20 50#define MWIFIEX_MAX_TXRX_BD 0x20
43#define MWIFIEX_TXBD_MASK 0x3F 51#define MWIFIEX_TXBD_MASK 0x3F
@@ -65,6 +73,8 @@
65#define PCIE_SCRATCH_10_REG 0xCE8 73#define PCIE_SCRATCH_10_REG 0xCE8
66#define PCIE_SCRATCH_11_REG 0xCEC 74#define PCIE_SCRATCH_11_REG 0xCEC
67#define PCIE_SCRATCH_12_REG 0xCF0 75#define PCIE_SCRATCH_12_REG 0xCF0
76#define PCIE_SCRATCH_13_REG 0xCF8
77#define PCIE_SCRATCH_14_REG 0xCFC
68#define PCIE_RD_DATA_PTR_Q0_Q1 0xC08C 78#define PCIE_RD_DATA_PTR_Q0_Q1 0xC08C
69#define PCIE_WR_DATA_PTR_Q0_Q1 0xC05C 79#define PCIE_WR_DATA_PTR_Q0_Q1 0xC05C
70 80
@@ -263,7 +273,6 @@ static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = {
263}; 273};
264 274
265struct mwifiex_pcie_device { 275struct mwifiex_pcie_device {
266 const char *firmware;
267 const struct mwifiex_pcie_card_reg *reg; 276 const struct mwifiex_pcie_card_reg *reg;
268 u16 blksz_fw_dl; 277 u16 blksz_fw_dl;
269 u16 tx_buf_size; 278 u16 tx_buf_size;
@@ -274,7 +283,6 @@ struct mwifiex_pcie_device {
274}; 283};
275 284
276static const struct mwifiex_pcie_device mwifiex_pcie8766 = { 285static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
277 .firmware = PCIE8766_DEFAULT_FW_NAME,
278 .reg = &mwifiex_reg_8766, 286 .reg = &mwifiex_reg_8766,
279 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, 287 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
280 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, 288 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
@@ -283,7 +291,6 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
283}; 291};
284 292
285static const struct mwifiex_pcie_device mwifiex_pcie8897 = { 293static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
286 .firmware = PCIE8897_DEFAULT_FW_NAME,
287 .reg = &mwifiex_reg_8897, 294 .reg = &mwifiex_reg_8897,
288 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, 295 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
289 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, 296 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
@@ -294,7 +301,6 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
294}; 301};
295 302
296static const struct mwifiex_pcie_device mwifiex_pcie8997 = { 303static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
297 .firmware = PCIE8997_DEFAULT_FW_NAME,
298 .reg = &mwifiex_reg_8997, 304 .reg = &mwifiex_reg_8997,
299 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, 305 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
300 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, 306 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 489f7a911a83..624b0a95c64e 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -494,13 +494,13 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
494 *scan_chan_list, 494 *scan_chan_list,
495 u8 filtered_scan) 495 u8 filtered_scan)
496{ 496{
497 enum ieee80211_band band; 497 enum nl80211_band band;
498 struct ieee80211_supported_band *sband; 498 struct ieee80211_supported_band *sband;
499 struct ieee80211_channel *ch; 499 struct ieee80211_channel *ch;
500 struct mwifiex_adapter *adapter = priv->adapter; 500 struct mwifiex_adapter *adapter = priv->adapter;
501 int chan_idx = 0, i; 501 int chan_idx = 0, i;
502 502
503 for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) { 503 for (band = 0; (band < NUM_NL80211_BANDS) ; band++) {
504 504
505 if (!priv->wdev.wiphy->bands[band]) 505 if (!priv->wdev.wiphy->bands[band])
506 continue; 506 continue;
@@ -557,13 +557,13 @@ mwifiex_bgscan_create_channel_list(struct mwifiex_private *priv,
557 struct mwifiex_chan_scan_param_set 557 struct mwifiex_chan_scan_param_set
558 *scan_chan_list) 558 *scan_chan_list)
559{ 559{
560 enum ieee80211_band band; 560 enum nl80211_band band;
561 struct ieee80211_supported_band *sband; 561 struct ieee80211_supported_band *sband;
562 struct ieee80211_channel *ch; 562 struct ieee80211_channel *ch;
563 struct mwifiex_adapter *adapter = priv->adapter; 563 struct mwifiex_adapter *adapter = priv->adapter;
564 int chan_idx = 0, i; 564 int chan_idx = 0, i;
565 565
566 for (band = 0; (band < IEEE80211_NUM_BANDS); band++) { 566 for (band = 0; (band < NUM_NL80211_BANDS); band++) {
567 if (!priv->wdev.wiphy->bands[band]) 567 if (!priv->wdev.wiphy->bands[band])
568 continue; 568 continue;
569 569
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index b2c839ae2c3c..a0aec3e00457 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -1123,8 +1123,8 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
1123 __func__, pkt_len, blk_size); 1123 __func__, pkt_len, blk_size);
1124 break; 1124 break;
1125 } 1125 }
1126 skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len, 1126
1127 GFP_KERNEL | GFP_DMA); 1127 skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len, GFP_KERNEL);
1128 if (!skb_deaggr) 1128 if (!skb_deaggr)
1129 break; 1129 break;
1130 skb_put(skb_deaggr, pkt_len); 1130 skb_put(skb_deaggr, pkt_len);
@@ -1373,8 +1373,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1373 1373
1374 /* copy pkt to deaggr buf */ 1374 /* copy pkt to deaggr buf */
1375 skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind], 1375 skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind],
1376 GFP_KERNEL | 1376 GFP_KERNEL);
1377 GFP_DMA);
1378 if (!skb_deaggr) { 1377 if (!skb_deaggr) {
1379 mwifiex_dbg(adapter, ERROR, "skb allocation failure\t" 1378 mwifiex_dbg(adapter, ERROR, "skb allocation failure\t"
1380 "drop pkt len=%d type=%d\n", 1379 "drop pkt len=%d type=%d\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 30f152601c57..8cb895b7f2ee 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1558,6 +1558,30 @@ static int mwifiex_cmd_robust_coex(struct mwifiex_private *priv,
1558 return 0; 1558 return 0;
1559} 1559}
1560 1560
1561static int mwifiex_cmd_gtk_rekey_offload(struct mwifiex_private *priv,
1562 struct host_cmd_ds_command *cmd,
1563 u16 cmd_action,
1564 struct cfg80211_gtk_rekey_data *data)
1565{
1566 struct host_cmd_ds_gtk_rekey_params *rekey = &cmd->params.rekey;
1567 u64 rekey_ctr;
1568
1569 cmd->command = cpu_to_le16(HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG);
1570 cmd->size = cpu_to_le16(sizeof(*rekey) + S_DS_GEN);
1571
1572 rekey->action = cpu_to_le16(cmd_action);
1573 if (cmd_action == HostCmd_ACT_GEN_SET) {
1574 memcpy(rekey->kek, data->kek, NL80211_KEK_LEN);
1575 memcpy(rekey->kck, data->kck, NL80211_KCK_LEN);
1576 rekey_ctr = be64_to_cpup((__be64 *)data->replay_ctr);
1577 rekey->replay_ctr_low = cpu_to_le32((u32)rekey_ctr);
1578 rekey->replay_ctr_high =
1579 cpu_to_le32((u32)((u64)rekey_ctr >> 32));
1580 }
1581
1582 return 0;
1583}
1584
1561static int 1585static int
1562mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv, 1586mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
1563 struct host_cmd_ds_command *cmd, 1587 struct host_cmd_ds_command *cmd,
@@ -2094,6 +2118,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
2094 ret = mwifiex_cmd_robust_coex(priv, cmd_ptr, cmd_action, 2118 ret = mwifiex_cmd_robust_coex(priv, cmd_ptr, cmd_action,
2095 data_buf); 2119 data_buf);
2096 break; 2120 break;
2121 case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
2122 ret = mwifiex_cmd_gtk_rekey_offload(priv, cmd_ptr, cmd_action,
2123 data_buf);
2124 break;
2097 default: 2125 default:
2098 mwifiex_dbg(priv->adapter, ERROR, 2126 mwifiex_dbg(priv->adapter, ERROR,
2099 "PREP_CMD: unknown cmd- %#x\n", cmd_no); 2127 "PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index d96523e10eb4..434b9776db45 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1244,6 +1244,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
1244 case HostCmd_CMD_ROBUST_COEX: 1244 case HostCmd_CMD_ROBUST_COEX:
1245 ret = mwifiex_ret_robust_coex(priv, resp, data_buf); 1245 ret = mwifiex_ret_robust_coex(priv, resp, data_buf);
1246 break; 1246 break;
1247 case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
1248 break;
1247 default: 1249 default:
1248 mwifiex_dbg(adapter, ERROR, 1250 mwifiex_dbg(adapter, ERROR,
1249 "CMD_RESP: unknown cmd response %#x\n", 1251 "CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 070bce401151..0104108b4ea2 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -147,6 +147,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
147 mwifiex_stop_net_dev_queue(priv->netdev, adapter); 147 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
148 if (netif_carrier_ok(priv->netdev)) 148 if (netif_carrier_ok(priv->netdev))
149 netif_carrier_off(priv->netdev); 149 netif_carrier_off(priv->netdev);
150
151 mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG,
152 HostCmd_ACT_GEN_REMOVE, 0, NULL, false);
150} 153}
151 154
152static int mwifiex_parse_tdls_event(struct mwifiex_private *priv, 155static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index d5c56eb9e985..d8de432d46a2 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -509,7 +509,8 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
509 509
510 if (priv && priv->sched_scanning) { 510 if (priv && priv->sched_scanning) {
511#ifdef CONFIG_PM 511#ifdef CONFIG_PM
512 if (!priv->wdev.wiphy->wowlan_config->nd_config) { 512 if (priv->wdev.wiphy->wowlan_config &&
513 !priv->wdev.wiphy->wowlan_config->nd_config) {
513#endif 514#endif
514 mwifiex_dbg(adapter, CMD, "aborting bgscan!\n"); 515 mwifiex_dbg(adapter, CMD, "aborting bgscan!\n");
515 mwifiex_stop_bg_scan(priv); 516 mwifiex_stop_bg_scan(priv);
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 150649602e98..df9704de0715 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -285,7 +285,7 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
285 else 285 else
286 usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg; 286 usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
287 287
288 /* find the minmum bandwith between AP/TDLS peers */ 288 /* find the minimum bandwidth between AP/TDLS peers */
289 vht_cap = &sta_ptr->tdls_cap.vhtcap; 289 vht_cap = &sta_ptr->tdls_cap.vhtcap;
290 supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info); 290 supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
291 peer_supp_chwd_set = 291 peer_supp_chwd_set =
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 16d95b22fe5c..f79d00d1e294 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -694,7 +694,7 @@ static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
694 struct mwifiex_ie_list *ap_ie = cmd_buf; 694 struct mwifiex_ie_list *ap_ie = cmd_buf;
695 struct mwifiex_ie_types_header *tlv_ie = (void *)tlv; 695 struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
696 696
697 if (!ap_ie || !ap_ie->len || !ap_ie->ie_list) 697 if (!ap_ie || !ap_ie->len)
698 return -1; 698 return -1;
699 699
700 *ie_size += le16_to_cpu(ap_ie->len) + 700 *ie_size += le16_to_cpu(ap_ie->len) +
@@ -816,7 +816,7 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
816 chandef.chan->center_freq); 816 chandef.chan->center_freq);
817 817
818 /* Set appropriate bands */ 818 /* Set appropriate bands */
819 if (chandef.chan->band == IEEE80211_BAND_2GHZ) { 819 if (chandef.chan->band == NL80211_BAND_2GHZ) {
820 bss_cfg->band_cfg = BAND_CONFIG_BG; 820 bss_cfg->band_cfg = BAND_CONFIG_BG;
821 config_bands = BAND_B | BAND_G; 821 config_bands = BAND_B | BAND_G;
822 822
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 52f7981a8afc..c95b61dc87c2 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -212,6 +212,8 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
212 atomic_inc(&adapter->tx_pending); 212 atomic_inc(&adapter->tx_pending);
213 atomic_inc(&adapter->pending_bridged_pkts); 213 atomic_inc(&adapter->pending_bridged_pkts);
214 214
215 mwifiex_queue_main_work(priv->adapter);
216
215 return; 217 return;
216} 218}
217 219
@@ -263,6 +265,96 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
263 return mwifiex_process_rx_packet(priv, skb); 265 return mwifiex_process_rx_packet(priv, skb);
264} 266}
265 267
268int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
269 struct sk_buff *skb)
270{
271 struct mwifiex_adapter *adapter = adapter;
272 struct mwifiex_sta_node *src_node;
273 struct ethhdr *p_ethhdr;
274 struct sk_buff *skb_uap;
275 struct mwifiex_txinfo *tx_info;
276
277 if (!skb)
278 return -1;
279
280 p_ethhdr = (void *)skb->data;
281 src_node = mwifiex_get_sta_entry(priv, p_ethhdr->h_source);
282 if (src_node) {
283 src_node->stats.last_rx = jiffies;
284 src_node->stats.rx_bytes += skb->len;
285 src_node->stats.rx_packets++;
286 }
287
288 skb->dev = priv->netdev;
289 skb->protocol = eth_type_trans(skb, priv->netdev);
290 skb->ip_summed = CHECKSUM_NONE;
291
292 /* This is required only in case of 11n and USB/PCIE as we alloc
293 * a buffer of 4K only if its 11N (to be able to receive 4K
294 * AMSDU packets). In case of SD we allocate buffers based
295 * on the size of packet and hence this is not needed.
296 *
297 * Modifying the truesize here as our allocation for each
298 * skb is 4K but we only receive 2K packets and this cause
299 * the kernel to start dropping packets in case where
300 * application has allocated buffer based on 2K size i.e.
301 * if there a 64K packet received (in IP fragments and
302 * application allocates 64K to receive this packet but
303 * this packet would almost double up because we allocate
304 * each 1.5K fragment in 4K and pass it up. As soon as the
305 * 64K limit hits kernel will start to drop rest of the
306 * fragments. Currently we fail the Filesndl-ht.scr script
307 * for UDP, hence this fix
308 */
309 if ((adapter->iface_type == MWIFIEX_USB ||
310 adapter->iface_type == MWIFIEX_PCIE) &&
311 (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
312 skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
313
314 if (is_multicast_ether_addr(p_ethhdr->h_dest) ||
315 mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) {
316 if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)
317 skb_uap =
318 skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
319 else
320 skb_uap = skb_copy(skb, GFP_ATOMIC);
321
322 if (likely(skb_uap)) {
323 tx_info = MWIFIEX_SKB_TXCB(skb_uap);
324 memset(tx_info, 0, sizeof(*tx_info));
325 tx_info->bss_num = priv->bss_num;
326 tx_info->bss_type = priv->bss_type;
327 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
328 __net_timestamp(skb_uap);
329 mwifiex_wmm_add_buf_txqueue(priv, skb_uap);
330 atomic_inc(&adapter->tx_pending);
331 atomic_inc(&adapter->pending_bridged_pkts);
332 if ((atomic_read(&adapter->pending_bridged_pkts) >=
333 MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
334 mwifiex_dbg(adapter, ERROR,
335 "Tx: Bridge packet limit reached. Drop packet!\n");
336 mwifiex_uap_cleanup_tx_queues(priv);
337 }
338
339 } else {
340 mwifiex_dbg(adapter, ERROR, "failed to allocate skb_uap");
341 }
342
343 mwifiex_queue_main_work(adapter);
344 /* Don't forward Intra-BSS unicast packet to upper layer*/
345 if (mwifiex_get_sta_entry(priv, p_ethhdr->h_dest))
346 return 0;
347 }
348
349 /* Forward multicast/broadcast packet to upper layer*/
350 if (in_interrupt())
351 netif_rx(skb);
352 else
353 netif_rx_ni(skb);
354
355 return 0;
356}
357
266/* 358/*
267 * This function processes the packet received on AP interface. 359 * This function processes the packet received on AP interface.
268 * 360 *
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 088429d0a634..b1b400b59d86 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -346,20 +346,20 @@ struct mwl8k_sta {
346#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv)) 346#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
347 347
348static const struct ieee80211_channel mwl8k_channels_24[] = { 348static const struct ieee80211_channel mwl8k_channels_24[] = {
349 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, }, 349 { .band = NL80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
350 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, }, 350 { .band = NL80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
351 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, }, 351 { .band = NL80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
352 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, }, 352 { .band = NL80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
353 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, }, 353 { .band = NL80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
354 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, }, 354 { .band = NL80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
355 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, }, 355 { .band = NL80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
356 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, }, 356 { .band = NL80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
357 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, }, 357 { .band = NL80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
358 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, }, 358 { .band = NL80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
359 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, }, 359 { .band = NL80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
360 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, }, 360 { .band = NL80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
361 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, }, 361 { .band = NL80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
362 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, }, 362 { .band = NL80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
363}; 363};
364 364
365static const struct ieee80211_rate mwl8k_rates_24[] = { 365static const struct ieee80211_rate mwl8k_rates_24[] = {
@@ -379,10 +379,10 @@ static const struct ieee80211_rate mwl8k_rates_24[] = {
379}; 379};
380 380
381static const struct ieee80211_channel mwl8k_channels_50[] = { 381static const struct ieee80211_channel mwl8k_channels_50[] = {
382 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, }, 382 { .band = NL80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
383 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, }, 383 { .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
384 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, }, 384 { .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
385 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, }, 385 { .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
386}; 386};
387 387
388static const struct ieee80211_rate mwl8k_rates_50[] = { 388static const struct ieee80211_rate mwl8k_rates_50[] = {
@@ -1010,11 +1010,11 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
1010 } 1010 }
1011 1011
1012 if (rxd->channel > 14) { 1012 if (rxd->channel > 14) {
1013 status->band = IEEE80211_BAND_5GHZ; 1013 status->band = NL80211_BAND_5GHZ;
1014 if (!(status->flag & RX_FLAG_HT)) 1014 if (!(status->flag & RX_FLAG_HT))
1015 status->rate_idx -= 5; 1015 status->rate_idx -= 5;
1016 } else { 1016 } else {
1017 status->band = IEEE80211_BAND_2GHZ; 1017 status->band = NL80211_BAND_2GHZ;
1018 } 1018 }
1019 status->freq = ieee80211_channel_to_frequency(rxd->channel, 1019 status->freq = ieee80211_channel_to_frequency(rxd->channel,
1020 status->band); 1020 status->band);
@@ -1118,11 +1118,11 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
1118 status->flag |= RX_FLAG_HT; 1118 status->flag |= RX_FLAG_HT;
1119 1119
1120 if (rxd->channel > 14) { 1120 if (rxd->channel > 14) {
1121 status->band = IEEE80211_BAND_5GHZ; 1121 status->band = NL80211_BAND_5GHZ;
1122 if (!(status->flag & RX_FLAG_HT)) 1122 if (!(status->flag & RX_FLAG_HT))
1123 status->rate_idx -= 5; 1123 status->rate_idx -= 5;
1124 } else { 1124 } else {
1125 status->band = IEEE80211_BAND_2GHZ; 1125 status->band = NL80211_BAND_2GHZ;
1126 } 1126 }
1127 status->freq = ieee80211_channel_to_frequency(rxd->channel, 1127 status->freq = ieee80211_channel_to_frequency(rxd->channel,
1128 status->band); 1128 status->band);
@@ -2300,13 +2300,13 @@ static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
2300 BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24)); 2300 BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
2301 memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24)); 2301 memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
2302 2302
2303 priv->band_24.band = IEEE80211_BAND_2GHZ; 2303 priv->band_24.band = NL80211_BAND_2GHZ;
2304 priv->band_24.channels = priv->channels_24; 2304 priv->band_24.channels = priv->channels_24;
2305 priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24); 2305 priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
2306 priv->band_24.bitrates = priv->rates_24; 2306 priv->band_24.bitrates = priv->rates_24;
2307 priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24); 2307 priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
2308 2308
2309 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24; 2309 hw->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band_24;
2310} 2310}
2311 2311
2312static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw) 2312static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
@@ -2319,13 +2319,13 @@ static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
2319 BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50)); 2319 BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
2320 memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50)); 2320 memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
2321 2321
2322 priv->band_50.band = IEEE80211_BAND_5GHZ; 2322 priv->band_50.band = NL80211_BAND_5GHZ;
2323 priv->band_50.channels = priv->channels_50; 2323 priv->band_50.channels = priv->channels_50;
2324 priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50); 2324 priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
2325 priv->band_50.bitrates = priv->rates_50; 2325 priv->band_50.bitrates = priv->rates_50;
2326 priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50); 2326 priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
2327 2327
2328 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50; 2328 hw->wiphy->bands[NL80211_BAND_5GHZ] = &priv->band_50;
2329} 2329}
2330 2330
2331/* 2331/*
@@ -2876,9 +2876,9 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
2876 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2876 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2877 cmd->action = cpu_to_le16(MWL8K_CMD_SET_LIST); 2877 cmd->action = cpu_to_le16(MWL8K_CMD_SET_LIST);
2878 2878
2879 if (channel->band == IEEE80211_BAND_2GHZ) 2879 if (channel->band == NL80211_BAND_2GHZ)
2880 cmd->band = cpu_to_le16(0x1); 2880 cmd->band = cpu_to_le16(0x1);
2881 else if (channel->band == IEEE80211_BAND_5GHZ) 2881 else if (channel->band == NL80211_BAND_5GHZ)
2882 cmd->band = cpu_to_le16(0x4); 2882 cmd->band = cpu_to_le16(0x4);
2883 2883
2884 cmd->channel = cpu_to_le16(channel->hw_value); 2884 cmd->channel = cpu_to_le16(channel->hw_value);
@@ -3067,7 +3067,7 @@ static int freq_to_idx(struct mwl8k_priv *priv, int freq)
3067 struct ieee80211_supported_band *sband; 3067 struct ieee80211_supported_band *sband;
3068 int band, ch, idx = 0; 3068 int band, ch, idx = 0;
3069 3069
3070 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { 3070 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
3071 sband = priv->hw->wiphy->bands[band]; 3071 sband = priv->hw->wiphy->bands[band];
3072 if (!sband) 3072 if (!sband)
3073 continue; 3073 continue;
@@ -3149,9 +3149,9 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
3149 cmd->action = cpu_to_le16(MWL8K_CMD_SET); 3149 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
3150 cmd->current_channel = channel->hw_value; 3150 cmd->current_channel = channel->hw_value;
3151 3151
3152 if (channel->band == IEEE80211_BAND_2GHZ) 3152 if (channel->band == NL80211_BAND_2GHZ)
3153 cmd->channel_flags |= cpu_to_le32(0x00000001); 3153 cmd->channel_flags |= cpu_to_le32(0x00000001);
3154 else if (channel->band == IEEE80211_BAND_5GHZ) 3154 else if (channel->band == NL80211_BAND_5GHZ)
3155 cmd->channel_flags |= cpu_to_le32(0x00000004); 3155 cmd->channel_flags |= cpu_to_le32(0x00000004);
3156 3156
3157 if (!priv->sw_scan_start) { 3157 if (!priv->sw_scan_start) {
@@ -4094,10 +4094,10 @@ static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
4094 memcpy(cmd->mac_addr, sta->addr, ETH_ALEN); 4094 memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
4095 cmd->stn_id = cpu_to_le16(sta->aid); 4095 cmd->stn_id = cpu_to_le16(sta->aid);
4096 cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD); 4096 cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
4097 if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) 4097 if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
4098 rates = sta->supp_rates[IEEE80211_BAND_2GHZ]; 4098 rates = sta->supp_rates[NL80211_BAND_2GHZ];
4099 else 4099 else
4100 rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5; 4100 rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
4101 cmd->legacy_rates = cpu_to_le32(rates); 4101 cmd->legacy_rates = cpu_to_le32(rates);
4102 if (sta->ht_cap.ht_supported) { 4102 if (sta->ht_cap.ht_supported) {
4103 cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0]; 4103 cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
@@ -4529,10 +4529,10 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
4529 p->ht_caps = cpu_to_le16(sta->ht_cap.cap); 4529 p->ht_caps = cpu_to_le16(sta->ht_cap.cap);
4530 p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) | 4530 p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
4531 ((sta->ht_cap.ampdu_density & 7) << 2); 4531 ((sta->ht_cap.ampdu_density & 7) << 2);
4532 if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) 4532 if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
4533 rates = sta->supp_rates[IEEE80211_BAND_2GHZ]; 4533 rates = sta->supp_rates[NL80211_BAND_2GHZ];
4534 else 4534 else
4535 rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5; 4535 rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
4536 legacy_rate_mask_to_array(p->legacy_rates, rates); 4536 legacy_rate_mask_to_array(p->legacy_rates, rates);
4537 memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16); 4537 memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
4538 p->interop = 1; 4538 p->interop = 1;
@@ -5010,11 +5010,11 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5010 goto out; 5010 goto out;
5011 } 5011 }
5012 5012
5013 if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) { 5013 if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
5014 ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ]; 5014 ap_legacy_rates = ap->supp_rates[NL80211_BAND_2GHZ];
5015 } else { 5015 } else {
5016 ap_legacy_rates = 5016 ap_legacy_rates =
5017 ap->supp_rates[IEEE80211_BAND_5GHZ] << 5; 5017 ap->supp_rates[NL80211_BAND_5GHZ] << 5;
5018 } 5018 }
5019 memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16); 5019 memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
5020 5020
@@ -5042,7 +5042,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5042 idx--; 5042 idx--;
5043 5043
5044 if (hw->conf.chandef.chan->band == 5044 if (hw->conf.chandef.chan->band ==
5045 IEEE80211_BAND_2GHZ) 5045 NL80211_BAND_2GHZ)
5046 rate = mwl8k_rates_24[idx].hw_value; 5046 rate = mwl8k_rates_24[idx].hw_value;
5047 else 5047 else
5048 rate = mwl8k_rates_50[idx].hw_value; 5048 rate = mwl8k_rates_50[idx].hw_value;
@@ -5116,7 +5116,7 @@ mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5116 if (idx) 5116 if (idx)
5117 idx--; 5117 idx--;
5118 5118
5119 if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) 5119 if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
5120 rate = mwl8k_rates_24[idx].hw_value; 5120 rate = mwl8k_rates_24[idx].hw_value;
5121 else 5121 else
5122 rate = mwl8k_rates_50[idx].hw_value; 5122 rate = mwl8k_rates_50[idx].hw_value;
@@ -5388,7 +5388,7 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
5388 struct ieee80211_supported_band *sband; 5388 struct ieee80211_supported_band *sband;
5389 5389
5390 if (priv->ap_fw) { 5390 if (priv->ap_fw) {
5391 sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ]; 5391 sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
5392 5392
5393 if (sband && idx >= sband->n_channels) { 5393 if (sband && idx >= sband->n_channels) {
5394 idx -= sband->n_channels; 5394 idx -= sband->n_channels;
@@ -5396,7 +5396,7 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
5396 } 5396 }
5397 5397
5398 if (!sband) 5398 if (!sband)
5399 sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ]; 5399 sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
5400 5400
5401 if (!sband || idx >= sband->n_channels) 5401 if (!sband || idx >= sband->n_channels)
5402 return -ENOENT; 5402 return -ENOENT;
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index 26190fd33407..8fa78d7156be 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -469,7 +469,7 @@ struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
469} 469}
470 470
471#define CHAN2G(_idx, _freq) { \ 471#define CHAN2G(_idx, _freq) { \
472 .band = IEEE80211_BAND_2GHZ, \ 472 .band = NL80211_BAND_2GHZ, \
473 .center_freq = (_freq), \ 473 .center_freq = (_freq), \
474 .hw_value = (_idx), \ 474 .hw_value = (_idx), \
475 .max_power = 30, \ 475 .max_power = 30, \
@@ -563,7 +563,7 @@ mt76_init_sband_2g(struct mt7601u_dev *dev)
563{ 563{
564 dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g), 564 dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
565 GFP_KERNEL); 565 GFP_KERNEL);
566 dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g; 566 dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = dev->sband_2g;
567 567
568 WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num > 568 WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
569 ARRAY_SIZE(mt76_channels_2ghz)); 569 ARRAY_SIZE(mt76_channels_2ghz));
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 7fa0128de7e3..c36fa4e03fb6 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -777,7 +777,7 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
777 u8 offset1; 777 u8 offset1;
778 u8 offset2; 778 u8 offset2;
779 779
780 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { 780 if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
781 rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom); 781 rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
782 offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0); 782 offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
783 offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1); 783 offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
@@ -1174,7 +1174,7 @@ static void rt2800_brightness_set(struct led_classdev *led_cdev,
1174 container_of(led_cdev, struct rt2x00_led, led_dev); 1174 container_of(led_cdev, struct rt2x00_led, led_dev);
1175 unsigned int enabled = brightness != LED_OFF; 1175 unsigned int enabled = brightness != LED_OFF;
1176 unsigned int bg_mode = 1176 unsigned int bg_mode =
1177 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); 1177 (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
1178 unsigned int polarity = 1178 unsigned int polarity =
1179 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg, 1179 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
1180 EEPROM_FREQ_LED_POLARITY); 1180 EEPROM_FREQ_LED_POLARITY);
@@ -1741,7 +1741,7 @@ static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
1741 u8 led_ctrl, led_g_mode, led_r_mode; 1741 u8 led_ctrl, led_g_mode, led_r_mode;
1742 1742
1743 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg); 1743 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
1744 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 1744 if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
1745 rt2x00_set_field32(&reg, GPIO_SWITCH_0, 1); 1745 rt2x00_set_field32(&reg, GPIO_SWITCH_0, 1);
1746 rt2x00_set_field32(&reg, GPIO_SWITCH_1, 1); 1746 rt2x00_set_field32(&reg, GPIO_SWITCH_1, 1);
1747 } else { 1747 } else {
@@ -1844,7 +1844,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1844 rt2x00_has_cap_bt_coexist(rt2x00dev)) { 1844 rt2x00_has_cap_bt_coexist(rt2x00dev)) {
1845 rt2x00_set_field8(&r3, BBP3_RX_ADC, 1); 1845 rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
1846 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1846 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
1847 rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); 1847 rt2x00dev->curr_band == NL80211_BAND_5GHZ);
1848 rt2800_set_ant_diversity(rt2x00dev, ANTENNA_B); 1848 rt2800_set_ant_diversity(rt2x00dev, ANTENNA_B);
1849 } else { 1849 } else {
1850 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1); 1850 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
@@ -3451,7 +3451,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
3451 * Matching Delta value -4 -3 -2 -1 0 +1 +2 +3 +4 3451 * Matching Delta value -4 -3 -2 -1 0 +1 +2 +3 +4
3452 * Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00 3452 * Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00
3453 */ 3453 */
3454 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { 3454 if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
3455 rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom); 3455 rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
3456 tssi_bounds[0] = rt2x00_get_field16(eeprom, 3456 tssi_bounds[0] = rt2x00_get_field16(eeprom,
3457 EEPROM_TSSI_BOUND_BG1_MINUS4); 3457 EEPROM_TSSI_BOUND_BG1_MINUS4);
@@ -3546,7 +3546,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
3546} 3546}
3547 3547
3548static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev, 3548static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
3549 enum ieee80211_band band) 3549 enum nl80211_band band)
3550{ 3550{
3551 u16 eeprom; 3551 u16 eeprom;
3552 u8 comp_en; 3552 u8 comp_en;
@@ -3562,7 +3562,7 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
3562 !test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) 3562 !test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
3563 return 0; 3563 return 0;
3564 3564
3565 if (band == IEEE80211_BAND_2GHZ) { 3565 if (band == NL80211_BAND_2GHZ) {
3566 comp_en = rt2x00_get_field16(eeprom, 3566 comp_en = rt2x00_get_field16(eeprom,
3567 EEPROM_TXPOWER_DELTA_ENABLE_2G); 3567 EEPROM_TXPOWER_DELTA_ENABLE_2G);
3568 if (comp_en) { 3568 if (comp_en) {
@@ -3611,7 +3611,7 @@ static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
3611} 3611}
3612 3612
3613static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b, 3613static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
3614 enum ieee80211_band band, int power_level, 3614 enum nl80211_band band, int power_level,
3615 u8 txpower, int delta) 3615 u8 txpower, int delta)
3616{ 3616{
3617 u16 eeprom; 3617 u16 eeprom;
@@ -3639,7 +3639,7 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
3639 rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, 3639 rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
3640 &eeprom); 3640 &eeprom);
3641 3641
3642 if (band == IEEE80211_BAND_2GHZ) 3642 if (band == NL80211_BAND_2GHZ)
3643 eirp_txpower_criterion = rt2x00_get_field16(eeprom, 3643 eirp_txpower_criterion = rt2x00_get_field16(eeprom,
3644 EEPROM_EIRP_MAX_TX_POWER_2GHZ); 3644 EEPROM_EIRP_MAX_TX_POWER_2GHZ);
3645 else 3645 else
@@ -3686,7 +3686,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
3686 u16 eeprom; 3686 u16 eeprom;
3687 u32 regs[TX_PWR_CFG_IDX_COUNT]; 3687 u32 regs[TX_PWR_CFG_IDX_COUNT];
3688 unsigned int offset; 3688 unsigned int offset;
3689 enum ieee80211_band band = chan->band; 3689 enum nl80211_band band = chan->band;
3690 int delta; 3690 int delta;
3691 int i; 3691 int i;
3692 3692
@@ -3697,7 +3697,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
3697 /* calculate temperature compensation delta */ 3697 /* calculate temperature compensation delta */
3698 delta = rt2800_get_gain_calibration_delta(rt2x00dev); 3698 delta = rt2800_get_gain_calibration_delta(rt2x00dev);
3699 3699
3700 if (band == IEEE80211_BAND_5GHZ) 3700 if (band == NL80211_BAND_5GHZ)
3701 offset = 16; 3701 offset = 16;
3702 else 3702 else
3703 offset = 0; 3703 offset = 0;
@@ -4055,7 +4055,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
4055 for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++) 4055 for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++)
4056 rt2x00_dbg(rt2x00dev, 4056 rt2x00_dbg(rt2x00dev,
4057 "band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n", 4057 "band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n",
4058 (band == IEEE80211_BAND_5GHZ) ? '5' : '2', 4058 (band == NL80211_BAND_5GHZ) ? '5' : '2',
4059 (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ? 4059 (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ?
4060 '4' : '2', 4060 '4' : '2',
4061 (i > TX_PWR_CFG_9_IDX) ? 4061 (i > TX_PWR_CFG_9_IDX) ?
@@ -4081,7 +4081,7 @@ static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
4081 u16 eeprom; 4081 u16 eeprom;
4082 u32 reg, offset; 4082 u32 reg, offset;
4083 int i, is_rate_b, delta, power_ctrl; 4083 int i, is_rate_b, delta, power_ctrl;
4084 enum ieee80211_band band = chan->band; 4084 enum nl80211_band band = chan->band;
4085 4085
4086 /* 4086 /*
4087 * Calculate HT40 compensation. For 40MHz we need to add or subtract 4087 * Calculate HT40 compensation. For 40MHz we need to add or subtract
@@ -4436,7 +4436,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
4436{ 4436{
4437 u8 vgc; 4437 u8 vgc;
4438 4438
4439 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { 4439 if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
4440 if (rt2x00_rt(rt2x00dev, RT3070) || 4440 if (rt2x00_rt(rt2x00dev, RT3070) ||
4441 rt2x00_rt(rt2x00dev, RT3071) || 4441 rt2x00_rt(rt2x00dev, RT3071) ||
4442 rt2x00_rt(rt2x00dev, RT3090) || 4442 rt2x00_rt(rt2x00dev, RT3090) ||
@@ -4511,7 +4511,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
4511 case RT3572: 4511 case RT3572:
4512 case RT3593: 4512 case RT3593:
4513 if (qual->rssi > -65) { 4513 if (qual->rssi > -65) {
4514 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) 4514 if (rt2x00dev->curr_band == NL80211_BAND_2GHZ)
4515 vgc += 0x20; 4515 vgc += 0x20;
4516 else 4516 else
4517 vgc += 0x10; 4517 vgc += 0x10;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 6418620f95ff..f68d492129c6 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -38,6 +38,7 @@
38#include <linux/kfifo.h> 38#include <linux/kfifo.h>
39#include <linux/hrtimer.h> 39#include <linux/hrtimer.h>
40#include <linux/average.h> 40#include <linux/average.h>
41#include <linux/usb.h>
41 42
42#include <net/mac80211.h> 43#include <net/mac80211.h>
43 44
@@ -752,8 +753,8 @@ struct rt2x00_dev {
752 * IEEE80211 control structure. 753 * IEEE80211 control structure.
753 */ 754 */
754 struct ieee80211_hw *hw; 755 struct ieee80211_hw *hw;
755 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 756 struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
756 enum ieee80211_band curr_band; 757 enum nl80211_band curr_band;
757 int curr_freq; 758 int curr_freq;
758 759
759 /* 760 /*
@@ -1002,6 +1003,8 @@ struct rt2x00_dev {
1002 1003
1003 /* Extra TX headroom required for alignment purposes. */ 1004 /* Extra TX headroom required for alignment purposes. */
1004 unsigned int extra_tx_headroom; 1005 unsigned int extra_tx_headroom;
1006
1007 struct usb_anchor *anchor;
1005}; 1008};
1006 1009
1007struct rt2x00_bar_list_entry { 1010struct rt2x00_bar_list_entry {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 5639ed816813..4e0c5653054b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -911,7 +911,7 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry,
911 const int value) 911 const int value)
912{ 912{
913 /* XXX: this assumption about the band is wrong for 802.11j */ 913 /* XXX: this assumption about the band is wrong for 802.11j */
914 entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 914 entry->band = channel <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
915 entry->center_freq = ieee80211_channel_to_frequency(channel, 915 entry->center_freq = ieee80211_channel_to_frequency(channel,
916 entry->band); 916 entry->band);
917 entry->hw_value = value; 917 entry->hw_value = value;
@@ -975,13 +975,13 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
975 * Channels: 2.4 GHz 975 * Channels: 2.4 GHz
976 */ 976 */
977 if (spec->supported_bands & SUPPORT_BAND_2GHZ) { 977 if (spec->supported_bands & SUPPORT_BAND_2GHZ) {
978 rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_channels = 14; 978 rt2x00dev->bands[NL80211_BAND_2GHZ].n_channels = 14;
979 rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_bitrates = num_rates; 979 rt2x00dev->bands[NL80211_BAND_2GHZ].n_bitrates = num_rates;
980 rt2x00dev->bands[IEEE80211_BAND_2GHZ].channels = channels; 980 rt2x00dev->bands[NL80211_BAND_2GHZ].channels = channels;
981 rt2x00dev->bands[IEEE80211_BAND_2GHZ].bitrates = rates; 981 rt2x00dev->bands[NL80211_BAND_2GHZ].bitrates = rates;
982 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 982 hw->wiphy->bands[NL80211_BAND_2GHZ] =
983 &rt2x00dev->bands[IEEE80211_BAND_2GHZ]; 983 &rt2x00dev->bands[NL80211_BAND_2GHZ];
984 memcpy(&rt2x00dev->bands[IEEE80211_BAND_2GHZ].ht_cap, 984 memcpy(&rt2x00dev->bands[NL80211_BAND_2GHZ].ht_cap,
985 &spec->ht, sizeof(spec->ht)); 985 &spec->ht, sizeof(spec->ht));
986 } 986 }
987 987
@@ -991,15 +991,15 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
991 * Channels: OFDM, UNII, HiperLAN2. 991 * Channels: OFDM, UNII, HiperLAN2.
992 */ 992 */
993 if (spec->supported_bands & SUPPORT_BAND_5GHZ) { 993 if (spec->supported_bands & SUPPORT_BAND_5GHZ) {
994 rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_channels = 994 rt2x00dev->bands[NL80211_BAND_5GHZ].n_channels =
995 spec->num_channels - 14; 995 spec->num_channels - 14;
996 rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_bitrates = 996 rt2x00dev->bands[NL80211_BAND_5GHZ].n_bitrates =
997 num_rates - 4; 997 num_rates - 4;
998 rt2x00dev->bands[IEEE80211_BAND_5GHZ].channels = &channels[14]; 998 rt2x00dev->bands[NL80211_BAND_5GHZ].channels = &channels[14];
999 rt2x00dev->bands[IEEE80211_BAND_5GHZ].bitrates = &rates[4]; 999 rt2x00dev->bands[NL80211_BAND_5GHZ].bitrates = &rates[4];
1000 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 1000 hw->wiphy->bands[NL80211_BAND_5GHZ] =
1001 &rt2x00dev->bands[IEEE80211_BAND_5GHZ]; 1001 &rt2x00dev->bands[NL80211_BAND_5GHZ];
1002 memcpy(&rt2x00dev->bands[IEEE80211_BAND_5GHZ].ht_cap, 1002 memcpy(&rt2x00dev->bands[NL80211_BAND_5GHZ].ht_cap,
1003 &spec->ht, sizeof(spec->ht)); 1003 &spec->ht, sizeof(spec->ht));
1004 } 1004 }
1005 1005
@@ -1016,11 +1016,11 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
1016 if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags)) 1016 if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
1017 ieee80211_unregister_hw(rt2x00dev->hw); 1017 ieee80211_unregister_hw(rt2x00dev->hw);
1018 1018
1019 if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) { 1019 if (likely(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ])) {
1020 kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels); 1020 kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels);
1021 kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->bitrates); 1021 kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates);
1022 rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL; 1022 rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
1023 rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; 1023 rt2x00dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
1024 } 1024 }
1025 1025
1026 kfree(rt2x00dev->spec.channels_info); 1026 kfree(rt2x00dev->spec.channels_info);
@@ -1422,11 +1422,14 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1422 cancel_work_sync(&rt2x00dev->intf_work); 1422 cancel_work_sync(&rt2x00dev->intf_work);
1423 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); 1423 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
1424 cancel_work_sync(&rt2x00dev->sleep_work); 1424 cancel_work_sync(&rt2x00dev->sleep_work);
1425#ifdef CONFIG_RT2X00_LIB_USB
1425 if (rt2x00_is_usb(rt2x00dev)) { 1426 if (rt2x00_is_usb(rt2x00dev)) {
1427 usb_kill_anchored_urbs(rt2x00dev->anchor);
1426 hrtimer_cancel(&rt2x00dev->txstatus_timer); 1428 hrtimer_cancel(&rt2x00dev->txstatus_timer);
1427 cancel_work_sync(&rt2x00dev->rxdone_work); 1429 cancel_work_sync(&rt2x00dev->rxdone_work);
1428 cancel_work_sync(&rt2x00dev->txdone_work); 1430 cancel_work_sync(&rt2x00dev->txdone_work);
1429 } 1431 }
1432#endif
1430 if (rt2x00dev->workqueue) 1433 if (rt2x00dev->workqueue)
1431 destroy_workqueue(rt2x00dev->workqueue); 1434 destroy_workqueue(rt2x00dev->workqueue);
1432 1435
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 7627af6098eb..7cf26c6124d1 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -171,8 +171,11 @@ static void rt2x00usb_register_read_async_cb(struct urb *urb)
171{ 171{
172 struct rt2x00_async_read_data *rd = urb->context; 172 struct rt2x00_async_read_data *rd = urb->context;
173 if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) { 173 if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) {
174 if (usb_submit_urb(urb, GFP_ATOMIC) < 0) 174 usb_anchor_urb(urb, rd->rt2x00dev->anchor);
175 if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
176 usb_unanchor_urb(urb);
175 kfree(rd); 177 kfree(rd);
178 }
176 } else 179 } else
177 kfree(rd); 180 kfree(rd);
178} 181}
@@ -206,8 +209,11 @@ void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
206 usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0), 209 usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
207 (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg), 210 (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
208 rt2x00usb_register_read_async_cb, rd); 211 rt2x00usb_register_read_async_cb, rd);
209 if (usb_submit_urb(urb, GFP_ATOMIC) < 0) 212 usb_anchor_urb(urb, rt2x00dev->anchor);
213 if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
214 usb_unanchor_urb(urb);
210 kfree(rd); 215 kfree(rd);
216 }
211 usb_free_urb(urb); 217 usb_free_urb(urb);
212} 218}
213EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async); 219EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async);
@@ -313,8 +319,10 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
313 entry->skb->data, length, 319 entry->skb->data, length,
314 rt2x00usb_interrupt_txdone, entry); 320 rt2x00usb_interrupt_txdone, entry);
315 321
322 usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
316 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 323 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
317 if (status) { 324 if (status) {
325 usb_unanchor_urb(entry_priv->urb);
318 if (status == -ENODEV) 326 if (status == -ENODEV)
319 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); 327 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
320 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 328 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -402,8 +410,10 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
402 entry->skb->data, entry->skb->len, 410 entry->skb->data, entry->skb->len,
403 rt2x00usb_interrupt_rxdone, entry); 411 rt2x00usb_interrupt_rxdone, entry);
404 412
413 usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
405 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 414 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
406 if (status) { 415 if (status) {
416 usb_unanchor_urb(entry_priv->urb);
407 if (status == -ENODEV) 417 if (status == -ENODEV)
408 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); 418 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
409 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 419 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -818,6 +828,13 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
818 if (retval) 828 if (retval)
819 goto exit_free_reg; 829 goto exit_free_reg;
820 830
831 rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
832 sizeof(struct usb_anchor),
833 GFP_KERNEL);
834 if (!rt2x00dev->anchor)
835 goto exit_free_reg;
836
837 init_usb_anchor(rt2x00dev->anchor);
821 return 0; 838 return 0;
822 839
823exit_free_reg: 840exit_free_reg:
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 24a3436ef952..03013eb2f642 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -252,9 +252,9 @@ static void rt61pci_brightness_set(struct led_classdev *led_cdev,
252 container_of(led_cdev, struct rt2x00_led, led_dev); 252 container_of(led_cdev, struct rt2x00_led, led_dev);
253 unsigned int enabled = brightness != LED_OFF; 253 unsigned int enabled = brightness != LED_OFF;
254 unsigned int a_mode = 254 unsigned int a_mode =
255 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); 255 (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ);
256 unsigned int bg_mode = 256 unsigned int bg_mode =
257 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); 257 (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
258 258
259 if (led->type == LED_TYPE_RADIO) { 259 if (led->type == LED_TYPE_RADIO) {
260 rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, 260 rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
@@ -643,12 +643,12 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
643 case ANTENNA_HW_DIVERSITY: 643 case ANTENNA_HW_DIVERSITY:
644 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); 644 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
645 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 645 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
646 (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ)); 646 (rt2x00dev->curr_band != NL80211_BAND_5GHZ));
647 break; 647 break;
648 case ANTENNA_A: 648 case ANTENNA_A:
649 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 649 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
650 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); 650 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
651 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) 651 if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
652 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); 652 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
653 else 653 else
654 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); 654 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
@@ -657,7 +657,7 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
657 default: 657 default:
658 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 658 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
659 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); 659 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
660 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) 660 if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
661 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); 661 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
662 else 662 else
663 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); 663 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
@@ -808,7 +808,7 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
808 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || 808 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
809 ant->tx == ANTENNA_SW_DIVERSITY); 809 ant->tx == ANTENNA_SW_DIVERSITY);
810 810
811 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 811 if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
812 sel = antenna_sel_a; 812 sel = antenna_sel_a;
813 lna = rt2x00_has_cap_external_lna_a(rt2x00dev); 813 lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
814 } else { 814 } else {
@@ -822,9 +822,9 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
822 rt2x00mmio_register_read(rt2x00dev, PHY_CSR0, &reg); 822 rt2x00mmio_register_read(rt2x00dev, PHY_CSR0, &reg);
823 823
824 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG, 824 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
825 rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); 825 rt2x00dev->curr_band == NL80211_BAND_2GHZ);
826 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A, 826 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
827 rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); 827 rt2x00dev->curr_band == NL80211_BAND_5GHZ);
828 828
829 rt2x00mmio_register_write(rt2x00dev, PHY_CSR0, reg); 829 rt2x00mmio_register_write(rt2x00dev, PHY_CSR0, reg);
830 830
@@ -846,7 +846,7 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
846 u16 eeprom; 846 u16 eeprom;
847 short lna_gain = 0; 847 short lna_gain = 0;
848 848
849 if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) { 849 if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) {
850 if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) 850 if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
851 lna_gain += 14; 851 lna_gain += 14;
852 852
@@ -1048,7 +1048,7 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
1048 /* 1048 /*
1049 * Determine r17 bounds. 1049 * Determine r17 bounds.
1050 */ 1050 */
1051 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 1051 if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
1052 low_bound = 0x28; 1052 low_bound = 0x28;
1053 up_bound = 0x48; 1053 up_bound = 0x48;
1054 if (rt2x00_has_cap_external_lna_a(rt2x00dev)) { 1054 if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
@@ -2077,7 +2077,7 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
2077 return 0; 2077 return 0;
2078 } 2078 }
2079 2079
2080 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 2080 if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
2081 if (lna == 3 || lna == 2) 2081 if (lna == 3 || lna == 2)
2082 offset += 10; 2082 offset += 10;
2083 } 2083 }
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index 7bbc86931168..c1397a6d3cee 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -197,9 +197,9 @@ static void rt73usb_brightness_set(struct led_classdev *led_cdev,
197 container_of(led_cdev, struct rt2x00_led, led_dev); 197 container_of(led_cdev, struct rt2x00_led, led_dev);
198 unsigned int enabled = brightness != LED_OFF; 198 unsigned int enabled = brightness != LED_OFF;
199 unsigned int a_mode = 199 unsigned int a_mode =
200 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); 200 (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ);
201 unsigned int bg_mode = 201 unsigned int bg_mode =
202 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); 202 (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
203 203
204 if (led->type == LED_TYPE_RADIO) { 204 if (led->type == LED_TYPE_RADIO) {
205 rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, 205 rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
@@ -593,13 +593,13 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
593 case ANTENNA_HW_DIVERSITY: 593 case ANTENNA_HW_DIVERSITY:
594 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); 594 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
595 temp = !rt2x00_has_cap_frame_type(rt2x00dev) && 595 temp = !rt2x00_has_cap_frame_type(rt2x00dev) &&
596 (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ); 596 (rt2x00dev->curr_band != NL80211_BAND_5GHZ);
597 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp); 597 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
598 break; 598 break;
599 case ANTENNA_A: 599 case ANTENNA_A:
600 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 600 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
601 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); 601 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
602 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) 602 if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
603 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); 603 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
604 else 604 else
605 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); 605 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
@@ -608,7 +608,7 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
608 default: 608 default:
609 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 609 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
610 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); 610 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
611 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) 611 if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
612 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); 612 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
613 else 613 else
614 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); 614 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
@@ -704,7 +704,7 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
704 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || 704 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
705 ant->tx == ANTENNA_SW_DIVERSITY); 705 ant->tx == ANTENNA_SW_DIVERSITY);
706 706
707 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 707 if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
708 sel = antenna_sel_a; 708 sel = antenna_sel_a;
709 lna = rt2x00_has_cap_external_lna_a(rt2x00dev); 709 lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
710 } else { 710 } else {
@@ -718,9 +718,9 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
718 rt2x00usb_register_read(rt2x00dev, PHY_CSR0, &reg); 718 rt2x00usb_register_read(rt2x00dev, PHY_CSR0, &reg);
719 719
720 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG, 720 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
721 (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)); 721 (rt2x00dev->curr_band == NL80211_BAND_2GHZ));
722 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A, 722 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
723 (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)); 723 (rt2x00dev->curr_band == NL80211_BAND_5GHZ));
724 724
725 rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg); 725 rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
726 726
@@ -736,7 +736,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
736 u16 eeprom; 736 u16 eeprom;
737 short lna_gain = 0; 737 short lna_gain = 0;
738 738
739 if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) { 739 if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) {
740 if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) 740 if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
741 lna_gain += 14; 741 lna_gain += 14;
742 742
@@ -923,7 +923,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
923 /* 923 /*
924 * Determine r17 bounds. 924 * Determine r17 bounds.
925 */ 925 */
926 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 926 if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
927 low_bound = 0x28; 927 low_bound = 0x28;
928 up_bound = 0x48; 928 up_bound = 0x48;
929 929
@@ -1657,7 +1657,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1657 return 0; 1657 return 0;
1658 } 1658 }
1659 1659
1660 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 1660 if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
1661 if (rt2x00_has_cap_external_lna_a(rt2x00dev)) { 1661 if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
1662 if (lna == 3 || lna == 2) 1662 if (lna == 3 || lna == 2)
1663 offset += 10; 1663 offset += 10;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index a43a16fde59d..ba242d0160ec 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -526,7 +526,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
526 * ieee80211_generic_frame_duration 526 * ieee80211_generic_frame_duration
527 */ 527 */
528 duration = ieee80211_generic_frame_duration(dev, priv->vif, 528 duration = ieee80211_generic_frame_duration(dev, priv->vif,
529 IEEE80211_BAND_2GHZ, skb->len, 529 NL80211_BAND_2GHZ, skb->len,
530 ieee80211_get_tx_rate(dev, info)); 530 ieee80211_get_tx_rate(dev, info));
531 531
532 frame_duration = priv->ack_time + le16_to_cpu(duration); 532 frame_duration = priv->ack_time + le16_to_cpu(duration);
@@ -1529,7 +1529,7 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
1529 priv->ack_time = 1529 priv->ack_time =
1530 le16_to_cpu(ieee80211_generic_frame_duration(dev, 1530 le16_to_cpu(ieee80211_generic_frame_duration(dev,
1531 priv->vif, 1531 priv->vif,
1532 IEEE80211_BAND_2GHZ, 10, 1532 NL80211_BAND_2GHZ, 10,
1533 &priv->rates[0])) - 10; 1533 &priv->rates[0])) - 10;
1534 1534
1535 rtl8180_conf_erp(dev, info); 1535 rtl8180_conf_erp(dev, info);
@@ -1736,7 +1736,7 @@ static int rtl8180_probe(struct pci_dev *pdev,
1736 if (err) { 1736 if (err) {
1737 printk(KERN_ERR "%s (rtl8180): Cannot obtain PCI resources\n", 1737 printk(KERN_ERR "%s (rtl8180): Cannot obtain PCI resources\n",
1738 pci_name(pdev)); 1738 pci_name(pdev));
1739 return err; 1739 goto err_disable_dev;
1740 } 1740 }
1741 1741
1742 io_addr = pci_resource_start(pdev, 0); 1742 io_addr = pci_resource_start(pdev, 0);
@@ -1795,12 +1795,12 @@ static int rtl8180_probe(struct pci_dev *pdev,
1795 memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels)); 1795 memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels));
1796 memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates)); 1796 memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
1797 1797
1798 priv->band.band = IEEE80211_BAND_2GHZ; 1798 priv->band.band = NL80211_BAND_2GHZ;
1799 priv->band.channels = priv->channels; 1799 priv->band.channels = priv->channels;
1800 priv->band.n_channels = ARRAY_SIZE(rtl818x_channels); 1800 priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
1801 priv->band.bitrates = priv->rates; 1801 priv->band.bitrates = priv->rates;
1802 priv->band.n_bitrates = 4; 1802 priv->band.n_bitrates = 4;
1803 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 1803 dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
1804 1804
1805 ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING); 1805 ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING);
1806 ieee80211_hw_set(dev, RX_INCLUDES_FCS); 1806 ieee80211_hw_set(dev, RX_INCLUDES_FCS);
@@ -1938,6 +1938,8 @@ static int rtl8180_probe(struct pci_dev *pdev,
1938 1938
1939 err_free_reg: 1939 err_free_reg:
1940 pci_release_regions(pdev); 1940 pci_release_regions(pdev);
1941
1942 err_disable_dev:
1941 pci_disable_device(pdev); 1943 pci_disable_device(pdev);
1942 return err; 1944 return err;
1943} 1945}
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index b7f72f9c7988..231f84db9ab0 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1470,12 +1470,12 @@ static int rtl8187_probe(struct usb_interface *intf,
1470 memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates)); 1470 memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
1471 priv->map = (struct rtl818x_csr *)0xFF00; 1471 priv->map = (struct rtl818x_csr *)0xFF00;
1472 1472
1473 priv->band.band = IEEE80211_BAND_2GHZ; 1473 priv->band.band = NL80211_BAND_2GHZ;
1474 priv->band.channels = priv->channels; 1474 priv->band.channels = priv->channels;
1475 priv->band.n_channels = ARRAY_SIZE(rtl818x_channels); 1475 priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
1476 priv->band.bitrates = priv->rates; 1476 priv->band.bitrates = priv->rates;
1477 priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates); 1477 priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates);
1478 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 1478 dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
1479 1479
1480 1480
1481 ieee80211_hw_set(dev, RX_INCLUDES_FCS); 1481 ieee80211_hw_set(dev, RX_INCLUDES_FCS);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
index abdff458b80f..db8433a9efe2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
@@ -91,33 +91,33 @@ static struct ieee80211_rate rtl8xxxu_rates[] = {
91}; 91};
92 92
93static struct ieee80211_channel rtl8xxxu_channels_2g[] = { 93static struct ieee80211_channel rtl8xxxu_channels_2g[] = {
94 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, 94 { .band = NL80211_BAND_2GHZ, .center_freq = 2412,
95 .hw_value = 1, .max_power = 30 }, 95 .hw_value = 1, .max_power = 30 },
96 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, 96 { .band = NL80211_BAND_2GHZ, .center_freq = 2417,
97 .hw_value = 2, .max_power = 30 }, 97 .hw_value = 2, .max_power = 30 },
98 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, 98 { .band = NL80211_BAND_2GHZ, .center_freq = 2422,
99 .hw_value = 3, .max_power = 30 }, 99 .hw_value = 3, .max_power = 30 },
100 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, 100 { .band = NL80211_BAND_2GHZ, .center_freq = 2427,
101 .hw_value = 4, .max_power = 30 }, 101 .hw_value = 4, .max_power = 30 },
102 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, 102 { .band = NL80211_BAND_2GHZ, .center_freq = 2432,
103 .hw_value = 5, .max_power = 30 }, 103 .hw_value = 5, .max_power = 30 },
104 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, 104 { .band = NL80211_BAND_2GHZ, .center_freq = 2437,
105 .hw_value = 6, .max_power = 30 }, 105 .hw_value = 6, .max_power = 30 },
106 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, 106 { .band = NL80211_BAND_2GHZ, .center_freq = 2442,
107 .hw_value = 7, .max_power = 30 }, 107 .hw_value = 7, .max_power = 30 },
108 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, 108 { .band = NL80211_BAND_2GHZ, .center_freq = 2447,
109 .hw_value = 8, .max_power = 30 }, 109 .hw_value = 8, .max_power = 30 },
110 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, 110 { .band = NL80211_BAND_2GHZ, .center_freq = 2452,
111 .hw_value = 9, .max_power = 30 }, 111 .hw_value = 9, .max_power = 30 },
112 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, 112 { .band = NL80211_BAND_2GHZ, .center_freq = 2457,
113 .hw_value = 10, .max_power = 30 }, 113 .hw_value = 10, .max_power = 30 },
114 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, 114 { .band = NL80211_BAND_2GHZ, .center_freq = 2462,
115 .hw_value = 11, .max_power = 30 }, 115 .hw_value = 11, .max_power = 30 },
116 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, 116 { .band = NL80211_BAND_2GHZ, .center_freq = 2467,
117 .hw_value = 12, .max_power = 30 }, 117 .hw_value = 12, .max_power = 30 },
118 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, 118 { .band = NL80211_BAND_2GHZ, .center_freq = 2472,
119 .hw_value = 13, .max_power = 30 }, 119 .hw_value = 13, .max_power = 30 },
120 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, 120 { .band = NL80211_BAND_2GHZ, .center_freq = 2484,
121 .hw_value = 14, .max_power = 30 } 121 .hw_value = 14, .max_power = 30 }
122}; 122};
123 123
@@ -1574,7 +1574,7 @@ static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
1574 val32 &= ~OFDM_RF_PATH_TX_MASK; 1574 val32 &= ~OFDM_RF_PATH_TX_MASK;
1575 if (priv->tx_paths == 2) 1575 if (priv->tx_paths == 2)
1576 val32 |= OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B; 1576 val32 |= OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B;
1577 else if (priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c) 1577 else if (priv->rtl_chip == RTL8192C || priv->rtl_chip == RTL8191C)
1578 val32 |= OFDM_RF_PATH_TX_B; 1578 val32 |= OFDM_RF_PATH_TX_B;
1579 else 1579 else
1580 val32 |= OFDM_RF_PATH_TX_A; 1580 val32 |= OFDM_RF_PATH_TX_A;
@@ -2199,11 +2199,11 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
2199 if (val32 & SYS_CFG_BT_FUNC) { 2199 if (val32 & SYS_CFG_BT_FUNC) {
2200 if (priv->chip_cut >= 3) { 2200 if (priv->chip_cut >= 3) {
2201 sprintf(priv->chip_name, "8723BU"); 2201 sprintf(priv->chip_name, "8723BU");
2202 priv->rtlchip = 0x8723b; 2202 priv->rtl_chip = RTL8723B;
2203 } else { 2203 } else {
2204 sprintf(priv->chip_name, "8723AU"); 2204 sprintf(priv->chip_name, "8723AU");
2205 priv->usb_interrupts = 1; 2205 priv->usb_interrupts = 1;
2206 priv->rtlchip = 0x8723a; 2206 priv->rtl_chip = RTL8723A;
2207 } 2207 }
2208 2208
2209 priv->rf_paths = 1; 2209 priv->rf_paths = 1;
@@ -2221,19 +2221,19 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
2221 } else if (val32 & SYS_CFG_TYPE_ID) { 2221 } else if (val32 & SYS_CFG_TYPE_ID) {
2222 bonding = rtl8xxxu_read32(priv, REG_HPON_FSM); 2222 bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
2223 bonding &= HPON_FSM_BONDING_MASK; 2223 bonding &= HPON_FSM_BONDING_MASK;
2224 if (priv->chip_cut >= 3) { 2224 if (priv->fops->has_s0s1) {
2225 if (bonding == HPON_FSM_BONDING_1T2R) { 2225 if (bonding == HPON_FSM_BONDING_1T2R) {
2226 sprintf(priv->chip_name, "8191EU"); 2226 sprintf(priv->chip_name, "8191EU");
2227 priv->rf_paths = 2; 2227 priv->rf_paths = 2;
2228 priv->rx_paths = 2; 2228 priv->rx_paths = 2;
2229 priv->tx_paths = 1; 2229 priv->tx_paths = 1;
2230 priv->rtlchip = 0x8191e; 2230 priv->rtl_chip = RTL8191E;
2231 } else { 2231 } else {
2232 sprintf(priv->chip_name, "8192EU"); 2232 sprintf(priv->chip_name, "8192EU");
2233 priv->rf_paths = 2; 2233 priv->rf_paths = 2;
2234 priv->rx_paths = 2; 2234 priv->rx_paths = 2;
2235 priv->tx_paths = 2; 2235 priv->tx_paths = 2;
2236 priv->rtlchip = 0x8192e; 2236 priv->rtl_chip = RTL8192E;
2237 } 2237 }
2238 } else if (bonding == HPON_FSM_BONDING_1T2R) { 2238 } else if (bonding == HPON_FSM_BONDING_1T2R) {
2239 sprintf(priv->chip_name, "8191CU"); 2239 sprintf(priv->chip_name, "8191CU");
@@ -2241,14 +2241,14 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
2241 priv->rx_paths = 2; 2241 priv->rx_paths = 2;
2242 priv->tx_paths = 1; 2242 priv->tx_paths = 1;
2243 priv->usb_interrupts = 1; 2243 priv->usb_interrupts = 1;
2244 priv->rtlchip = 0x8191c; 2244 priv->rtl_chip = RTL8191C;
2245 } else { 2245 } else {
2246 sprintf(priv->chip_name, "8192CU"); 2246 sprintf(priv->chip_name, "8192CU");
2247 priv->rf_paths = 2; 2247 priv->rf_paths = 2;
2248 priv->rx_paths = 2; 2248 priv->rx_paths = 2;
2249 priv->tx_paths = 2; 2249 priv->tx_paths = 2;
2250 priv->usb_interrupts = 1; 2250 priv->usb_interrupts = 1;
2251 priv->rtlchip = 0x8192c; 2251 priv->rtl_chip = RTL8192C;
2252 } 2252 }
2253 priv->has_wifi = 1; 2253 priv->has_wifi = 1;
2254 } else { 2254 } else {
@@ -2256,15 +2256,15 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
2256 priv->rf_paths = 1; 2256 priv->rf_paths = 1;
2257 priv->rx_paths = 1; 2257 priv->rx_paths = 1;
2258 priv->tx_paths = 1; 2258 priv->tx_paths = 1;
2259 priv->rtlchip = 0x8188c; 2259 priv->rtl_chip = RTL8188C;
2260 priv->usb_interrupts = 1; 2260 priv->usb_interrupts = 1;
2261 priv->has_wifi = 1; 2261 priv->has_wifi = 1;
2262 } 2262 }
2263 2263
2264 switch (priv->rtlchip) { 2264 switch (priv->rtl_chip) {
2265 case 0x8188e: 2265 case RTL8188E:
2266 case 0x8192e: 2266 case RTL8192E:
2267 case 0x8723b: 2267 case RTL8723B:
2268 switch (val32 & SYS_CFG_VENDOR_EXT_MASK) { 2268 switch (val32 & SYS_CFG_VENDOR_EXT_MASK) {
2269 case SYS_CFG_VENDOR_ID_TSMC: 2269 case SYS_CFG_VENDOR_ID_TSMC:
2270 sprintf(priv->chip_vendor, "TSMC"); 2270 sprintf(priv->chip_vendor, "TSMC");
@@ -2814,7 +2814,7 @@ static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
2814 /* 2814 /*
2815 * Init H2C command 2815 * Init H2C command
2816 */ 2816 */
2817 if (priv->rtlchip == 0x8723b) 2817 if (priv->rtl_chip == RTL8723B)
2818 rtl8xxxu_write8(priv, REG_HMTFR, 0x0f); 2818 rtl8xxxu_write8(priv, REG_HMTFR, 0x0f);
2819exit: 2819exit:
2820 return ret; 2820 return ret;
@@ -2997,7 +2997,7 @@ static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
2997 2997
2998 if (!priv->vendor_umc) 2998 if (!priv->vendor_umc)
2999 fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; 2999 fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
3000 else if (priv->chip_cut || priv->rtlchip == 0x8192c) 3000 else if (priv->chip_cut || priv->rtl_chip == RTL8192C)
3001 fw_name = "rtlwifi/rtl8192cufw_B.bin"; 3001 fw_name = "rtlwifi/rtl8192cufw_B.bin";
3002 else 3002 else
3003 fw_name = "rtlwifi/rtl8192cufw_A.bin"; 3003 fw_name = "rtlwifi/rtl8192cufw_A.bin";
@@ -3108,7 +3108,7 @@ rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
3108 } 3108 }
3109 } 3109 }
3110 3110
3111 if (priv->rtlchip != 0x8723b) 3111 if (priv->rtl_chip != RTL8723B)
3112 rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a); 3112 rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
3113 3113
3114 return 0; 3114 return 0;
@@ -3154,7 +3154,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
3154 * addresses, which is initialized here. Do we need this? 3154 * addresses, which is initialized here. Do we need this?
3155 */ 3155 */
3156 3156
3157 if (priv->rtlchip == 0x8723b) { 3157 if (priv->rtl_chip == RTL8723B) {
3158 val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); 3158 val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
3159 val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | 3159 val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB |
3160 SYS_FUNC_DIO_RF; 3160 SYS_FUNC_DIO_RF;
@@ -3176,7 +3176,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
3176 rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); 3176 rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
3177 } 3177 }
3178 3178
3179 if (priv->rtlchip != 0x8723b) { 3179 if (priv->rtl_chip != RTL8723B) {
3180 /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */ 3180 /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */
3181 val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL); 3181 val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
3182 val32 &= ~AFE_XTAL_RF_GATE; 3182 val32 &= ~AFE_XTAL_RF_GATE;
@@ -3193,7 +3193,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
3193 rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table); 3193 rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table);
3194 else if (priv->tx_paths == 2) 3194 else if (priv->tx_paths == 2)
3195 rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table); 3195 rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table);
3196 else if (priv->rtlchip == 0x8723b) { 3196 else if (priv->rtl_chip == RTL8723B) {
3197 /* 3197 /*
3198 * Why? 3198 * Why?
3199 */ 3199 */
@@ -3204,7 +3204,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
3204 rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table); 3204 rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table);
3205 3205
3206 3206
3207 if (priv->rtlchip == 0x8188c && priv->hi_pa && 3207 if (priv->rtl_chip == RTL8188C && priv->hi_pa &&
3208 priv->vendor_umc && priv->chip_cut == 1) 3208 priv->vendor_umc && priv->chip_cut == 1)
3209 rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50); 3209 rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50);
3210 3210
@@ -3266,7 +3266,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
3266 rtl8xxxu_write32(priv, REG_TX_TO_TX, val32); 3266 rtl8xxxu_write32(priv, REG_TX_TO_TX, val32);
3267 } 3267 }
3268 3268
3269 if (priv->rtlchip == 0x8723b) 3269 if (priv->rtl_chip == RTL8723B)
3270 rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table); 3270 rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table);
3271 else if (priv->hi_pa) 3271 else if (priv->hi_pa)
3272 rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table); 3272 rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
@@ -3283,7 +3283,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
3283 rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32); 3283 rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32);
3284 } 3284 }
3285 3285
3286 if (priv->rtlchip != 0x8723bu) { 3286 if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E) {
3287 ldoa15 = LDOA15_ENABLE | LDOA15_OBUF; 3287 ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
3288 ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT); 3288 ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
3289 ldohci12 = 0x57; 3289 ldohci12 = 0x57;
@@ -5955,7 +5955,7 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
5955 /* 5955 /*
5956 * Workaround for 8188RU LNA power leakage problem. 5956 * Workaround for 8188RU LNA power leakage problem.
5957 */ 5957 */
5958 if (priv->rtlchip == 0x8188c && priv->hi_pa) { 5958 if (priv->rtl_chip == RTL8188C && priv->hi_pa) {
5959 val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM); 5959 val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
5960 val32 &= ~BIT(1); 5960 val32 &= ~BIT(1);
5961 rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32); 5961 rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
@@ -6020,7 +6020,7 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
6020 /* 6020 /*
6021 * Workaround for 8188RU LNA power leakage problem. 6021 * Workaround for 8188RU LNA power leakage problem.
6022 */ 6022 */
6023 if (priv->rtlchip == 0x8188c && priv->hi_pa) { 6023 if (priv->rtl_chip == RTL8188C && priv->hi_pa) {
6024 val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM); 6024 val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
6025 val32 |= BIT(1); 6025 val32 |= BIT(1);
6026 rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32); 6026 rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
@@ -6313,7 +6313,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
6313 * Presumably this is for 8188EU as well 6313 * Presumably this is for 8188EU as well
6314 * Enable TX report and TX report timer 6314 * Enable TX report and TX report timer
6315 */ 6315 */
6316 if (priv->rtlchip == 0x8723bu) { 6316 if (priv->rtl_chip == RTL8723B) {
6317 val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL); 6317 val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
6318 val8 |= TX_REPORT_CTRL_TIMER_ENABLE; 6318 val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
6319 rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8); 6319 rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
@@ -6340,9 +6340,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
6340 6340
6341 /* Solve too many protocol error on USB bus */ 6341 /* Solve too many protocol error on USB bus */
6342 /* Can't do this for 8188/8192 UMC A cut parts */ 6342 /* Can't do this for 8188/8192 UMC A cut parts */
6343 if (priv->rtlchip == 0x8723a || 6343 if (priv->rtl_chip == RTL8723A ||
6344 ((priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c || 6344 ((priv->rtl_chip == RTL8192C || priv->rtl_chip == RTL8191C ||
6345 priv->rtlchip == 0x8188c) && 6345 priv->rtl_chip == RTL8188C) &&
6346 (priv->chip_cut || !priv->vendor_umc))) { 6346 (priv->chip_cut || !priv->vendor_umc))) {
6347 rtl8xxxu_write8(priv, 0xfe40, 0xe6); 6347 rtl8xxxu_write8(priv, 0xfe40, 0xe6);
6348 rtl8xxxu_write8(priv, 0xfe41, 0x94); 6348 rtl8xxxu_write8(priv, 0xfe41, 0x94);
@@ -6361,7 +6361,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
6361 rtl8xxxu_write8(priv, 0xfe42, 0x80); 6361 rtl8xxxu_write8(priv, 0xfe42, 0x80);
6362 } 6362 }
6363 6363
6364 if (priv->rtlchip == 0x8192e) { 6364 if (priv->rtl_chip == RTL8192E) {
6365 rtl8xxxu_write32(priv, REG_HIMR0, 0x00); 6365 rtl8xxxu_write32(priv, REG_HIMR0, 0x00);
6366 rtl8xxxu_write32(priv, REG_HIMR1, 0x00); 6366 rtl8xxxu_write32(priv, REG_HIMR1, 0x00);
6367 } 6367 }
@@ -6369,7 +6369,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
6369 if (priv->fops->phy_init_antenna_selection) 6369 if (priv->fops->phy_init_antenna_selection)
6370 priv->fops->phy_init_antenna_selection(priv); 6370 priv->fops->phy_init_antenna_selection(priv);
6371 6371
6372 if (priv->rtlchip == 0x8723b) 6372 if (priv->rtl_chip == RTL8723B)
6373 ret = rtl8xxxu_init_mac(priv, rtl8723b_mac_init_table); 6373 ret = rtl8xxxu_init_mac(priv, rtl8723b_mac_init_table);
6374 else 6374 else
6375 ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table); 6375 ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table);
@@ -6383,12 +6383,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
6383 if (ret) 6383 if (ret)
6384 goto exit; 6384 goto exit;
6385 6385
6386 switch(priv->rtlchip) { 6386 switch(priv->rtl_chip) {
6387 case 0x8723a: 6387 case RTL8723A:
6388 rftable = rtl8723au_radioa_1t_init_table; 6388 rftable = rtl8723au_radioa_1t_init_table;
6389 ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); 6389 ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
6390 break; 6390 break;
6391 case 0x8723b: 6391 case RTL8723B:
6392 rftable = rtl8723bu_radioa_1t_init_table; 6392 rftable = rtl8723bu_radioa_1t_init_table;
6393 ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); 6393 ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
6394 /* 6394 /*
@@ -6399,18 +6399,18 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
6399 msleep(200); 6399 msleep(200);
6400 rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0); 6400 rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0);
6401 break; 6401 break;
6402 case 0x8188c: 6402 case RTL8188C:
6403 if (priv->hi_pa) 6403 if (priv->hi_pa)
6404 rftable = rtl8188ru_radioa_1t_highpa_table; 6404 rftable = rtl8188ru_radioa_1t_highpa_table;
6405 else 6405 else
6406 rftable = rtl8192cu_radioa_1t_init_table; 6406 rftable = rtl8192cu_radioa_1t_init_table;
6407 ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); 6407 ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
6408 break; 6408 break;
6409 case 0x8191c: 6409 case RTL8191C:
6410 rftable = rtl8192cu_radioa_1t_init_table; 6410 rftable = rtl8192cu_radioa_1t_init_table;
6411 ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); 6411 ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
6412 break; 6412 break;
6413 case 0x8192c: 6413 case RTL8192C:
6414 rftable = rtl8192cu_radioa_2t_init_table; 6414 rftable = rtl8192cu_radioa_2t_init_table;
6415 ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); 6415 ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
6416 if (ret) 6416 if (ret)
@@ -6428,7 +6428,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
6428 /* 6428 /*
6429 * Chip specific quirks 6429 * Chip specific quirks
6430 */ 6430 */
6431 if (priv->rtlchip == 0x8723a) { 6431 if (priv->rtl_chip == RTL8723A) {
6432 /* Fix USB interface interference issue */ 6432 /* Fix USB interface interference issue */
6433 rtl8xxxu_write8(priv, 0xfe40, 0xe0); 6433 rtl8xxxu_write8(priv, 0xfe40, 0xe0);
6434 rtl8xxxu_write8(priv, 0xfe41, 0x8d); 6434 rtl8xxxu_write8(priv, 0xfe41, 0x8d);
@@ -6468,7 +6468,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
6468 */ 6468 */
6469 val8 = TX_TOTAL_PAGE_NUM + 1; 6469 val8 = TX_TOTAL_PAGE_NUM + 1;
6470 6470
6471 if (priv->rtlchip == 0x8723b) 6471 if (priv->rtl_chip == RTL8723B)
6472 val8 -= 1; 6472 val8 -= 1;
6473 6473
6474 rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8); 6474 rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
@@ -6484,7 +6484,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
6484 goto exit; 6484 goto exit;
6485 6485
6486 /* RFSW Control - clear bit 14 ?? */ 6486 /* RFSW Control - clear bit 14 ?? */
6487 if (priv->rtlchip != 0x8723b) 6487 if (priv->rtl_chip != RTL8723B)
6488 rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003); 6488 rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
6489 /* 0x07000760 */ 6489 /* 0x07000760 */
6490 val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW | 6490 val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
@@ -6501,14 +6501,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
6501 /* 6501 /*
6502 * Set RX page boundary 6502 * Set RX page boundary
6503 */ 6503 */
6504 if (priv->rtlchip == 0x8723b) 6504 if (priv->rtl_chip == RTL8723B)
6505 rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f); 6505 rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f);
6506 else 6506 else
6507 rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff); 6507 rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff);
6508 /* 6508 /*
6509 * Transfer page size is always 128 6509 * Transfer page size is always 128
6510 */ 6510 */
6511 if (priv->rtlchip == 0x8723b) 6511 if (priv->rtl_chip == RTL8723B)
6512 val8 = (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_RX_SHIFT) | 6512 val8 = (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_RX_SHIFT) |
6513 (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_TX_SHIFT); 6513 (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_TX_SHIFT);
6514 else 6514 else
@@ -6600,7 +6600,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
6600 /* 6600 /*
6601 * Initialize burst parameters 6601 * Initialize burst parameters
6602 */ 6602 */
6603 if (priv->rtlchip == 0x8723b) { 6603 if (priv->rtl_chip == RTL8723B) {
6604 /* 6604 /*
6605 * For USB high speed set 512B packets 6605 * For USB high speed set 512B packets
6606 */ 6606 */
@@ -6682,7 +6682,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
6682 val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT); 6682 val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT);
6683 rtl8xxxu_write8(priv, REG_NAV_UPPER, val8); 6683 rtl8xxxu_write8(priv, REG_NAV_UPPER, val8);
6684 6684
6685 if (priv->rtlchip == 0x8723a) { 6685 if (priv->rtl_chip == RTL8723A) {
6686 /* 6686 /*
6687 * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test, 6687 * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test,
6688 * but we need to find root cause. 6688 * but we need to find root cause.
@@ -7014,7 +7014,7 @@ static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
7014 * format. The descriptor checksum is still only calculated over the 7014 * format. The descriptor checksum is still only calculated over the
7015 * initial 32 bytes of the descriptor! 7015 * initial 32 bytes of the descriptor!
7016 */ 7016 */
7017static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc) 7017static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_txdesc32 *tx_desc)
7018{ 7018{
7019 __le16 *ptr = (__le16 *)tx_desc; 7019 __le16 *ptr = (__le16 *)tx_desc;
7020 u16 csum = 0; 7020 u16 csum = 0;
@@ -7026,7 +7026,7 @@ static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc)
7026 */ 7026 */
7027 tx_desc->csum = cpu_to_le16(0); 7027 tx_desc->csum = cpu_to_le16(0);
7028 7028
7029 for (i = 0; i < (sizeof(struct rtl8723au_tx_desc) / sizeof(u16)); i++) 7029 for (i = 0; i < (sizeof(struct rtl8xxxu_txdesc32) / sizeof(u16)); i++)
7030 csum = csum ^ le16_to_cpu(ptr[i]); 7030 csum = csum ^ le16_to_cpu(ptr[i]);
7031 7031
7032 tx_desc->csum |= cpu_to_le16(csum); 7032 tx_desc->csum |= cpu_to_le16(csum);
@@ -7164,8 +7164,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
7164 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 7164 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
7165 struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info); 7165 struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
7166 struct rtl8xxxu_priv *priv = hw->priv; 7166 struct rtl8xxxu_priv *priv = hw->priv;
7167 struct rtl8723au_tx_desc *tx_desc; 7167 struct rtl8xxxu_txdesc32 *tx_desc;
7168 struct rtl8723bu_tx_desc *tx_desc40; 7168 struct rtl8xxxu_txdesc40 *tx_desc40;
7169 struct rtl8xxxu_tx_urb *tx_urb; 7169 struct rtl8xxxu_tx_urb *tx_urb;
7170 struct ieee80211_sta *sta = NULL; 7170 struct ieee80211_sta *sta = NULL;
7171 struct ieee80211_vif *vif = tx_info->control.vif; 7171 struct ieee80211_vif *vif = tx_info->control.vif;
@@ -7210,7 +7210,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
7210 if (control && control->sta) 7210 if (control && control->sta)
7211 sta = control->sta; 7211 sta = control->sta;
7212 7212
7213 tx_desc = (struct rtl8723au_tx_desc *)skb_push(skb, tx_desc_size); 7213 tx_desc = (struct rtl8xxxu_txdesc32 *)skb_push(skb, tx_desc_size);
7214 7214
7215 memset(tx_desc, 0, tx_desc_size); 7215 memset(tx_desc, 0, tx_desc_size);
7216 tx_desc->pkt_size = cpu_to_le16(pktlen); 7216 tx_desc->pkt_size = cpu_to_le16(pktlen);
@@ -7267,37 +7267,35 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
7267 tx_desc->txdw5 |= cpu_to_le32(0x0001ff00); 7267 tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
7268 7268
7269 tx_desc->txdw3 = 7269 tx_desc->txdw3 =
7270 cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723A); 7270 cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
7271 7271
7272 if (ampdu_enable) 7272 if (ampdu_enable)
7273 tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723A); 7273 tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
7274 else 7274 else
7275 tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A); 7275 tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
7276 7276
7277 if (ieee80211_is_mgmt(hdr->frame_control)) { 7277 if (ieee80211_is_mgmt(hdr->frame_control)) {
7278 tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value); 7278 tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
7279 tx_desc->txdw4 |= 7279 tx_desc->txdw4 |=
7280 cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723A); 7280 cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
7281 tx_desc->txdw5 |= 7281 tx_desc->txdw5 |=
7282 cpu_to_le32(6 << 7282 cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
7283 TXDESC_RETRY_LIMIT_SHIFT_8723A);
7284 tx_desc->txdw5 |= 7283 tx_desc->txdw5 |=
7285 cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723A); 7284 cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
7286 } 7285 }
7287 7286
7288 if (ieee80211_is_data_qos(hdr->frame_control)) 7287 if (ieee80211_is_data_qos(hdr->frame_control))
7289 tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS_8723A); 7288 tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
7290 7289
7291 if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE || 7290 if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
7292 (sta && vif && vif->bss_conf.use_short_preamble)) 7291 (sta && vif && vif->bss_conf.use_short_preamble))
7293 tx_desc->txdw4 |= 7292 tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
7294 cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723A);
7295 7293
7296 if (rate_flag & IEEE80211_TX_RC_SHORT_GI || 7294 if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
7297 (ieee80211_is_data_qos(hdr->frame_control) && 7295 (ieee80211_is_data_qos(hdr->frame_control) &&
7298 sta && sta->ht_cap.cap & 7296 sta && sta->ht_cap.cap &
7299 (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) { 7297 (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
7300 tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI); 7298 tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
7301 } 7299 }
7302 7300
7303 if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { 7301 if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
@@ -7307,46 +7305,43 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
7307 */ 7305 */
7308 tx_desc->txdw4 |= 7306 tx_desc->txdw4 |=
7309 cpu_to_le32(DESC_RATE_24M << 7307 cpu_to_le32(DESC_RATE_24M <<
7310 TXDESC_RTS_RATE_SHIFT_8723A); 7308 TXDESC32_RTS_RATE_SHIFT);
7311 tx_desc->txdw4 |= 7309 tx_desc->txdw4 |=
7312 cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723A); 7310 cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
7313 tx_desc->txdw4 |= 7311 tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
7314 cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723A);
7315 } 7312 }
7316 } else { 7313 } else {
7317 tx_desc40 = (struct rtl8723bu_tx_desc *)tx_desc; 7314 tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc;
7318 7315
7319 tx_desc40->txdw4 = cpu_to_le32(rate); 7316 tx_desc40->txdw4 = cpu_to_le32(rate);
7320 if (ieee80211_is_data(hdr->frame_control)) { 7317 if (ieee80211_is_data(hdr->frame_control)) {
7321 tx_desc->txdw4 |= 7318 tx_desc->txdw4 |=
7322 cpu_to_le32(0x1f << 7319 cpu_to_le32(0x1f <<
7323 TXDESC_DATA_RATE_FB_SHIFT_8723B); 7320 TXDESC40_DATA_RATE_FB_SHIFT);
7324 } 7321 }
7325 7322
7326 tx_desc40->txdw9 = 7323 tx_desc40->txdw9 =
7327 cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723B); 7324 cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
7328 7325
7329 if (ampdu_enable) 7326 if (ampdu_enable)
7330 tx_desc40->txdw2 |= 7327 tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
7331 cpu_to_le32(TXDESC_AGG_ENABLE_8723B);
7332 else 7328 else
7333 tx_desc40->txdw2 |= cpu_to_le32(TXDESC_AGG_BREAK_8723B); 7329 tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
7334 7330
7335 if (ieee80211_is_mgmt(hdr->frame_control)) { 7331 if (ieee80211_is_mgmt(hdr->frame_control)) {
7336 tx_desc40->txdw4 = cpu_to_le32(tx_rate->hw_value); 7332 tx_desc40->txdw4 = cpu_to_le32(tx_rate->hw_value);
7337 tx_desc40->txdw3 |= 7333 tx_desc40->txdw3 |=
7338 cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723B); 7334 cpu_to_le32(TXDESC40_USE_DRIVER_RATE);
7339 tx_desc40->txdw4 |= 7335 tx_desc40->txdw4 |=
7340 cpu_to_le32(6 << 7336 cpu_to_le32(6 << TXDESC40_RETRY_LIMIT_SHIFT);
7341 TXDESC_RETRY_LIMIT_SHIFT_8723B);
7342 tx_desc40->txdw4 |= 7337 tx_desc40->txdw4 |=
7343 cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723B); 7338 cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE);
7344 } 7339 }
7345 7340
7346 if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE || 7341 if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
7347 (sta && vif && vif->bss_conf.use_short_preamble)) 7342 (sta && vif && vif->bss_conf.use_short_preamble))
7348 tx_desc40->txdw5 |= 7343 tx_desc40->txdw5 |=
7349 cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723B); 7344 cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
7350 7345
7351 if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { 7346 if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
7352 /* 7347 /*
@@ -7355,11 +7350,9 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
7355 */ 7350 */
7356 tx_desc->txdw4 |= 7351 tx_desc->txdw4 |=
7357 cpu_to_le32(DESC_RATE_24M << 7352 cpu_to_le32(DESC_RATE_24M <<
7358 TXDESC_RTS_RATE_SHIFT_8723B); 7353 TXDESC40_RTS_RATE_SHIFT);
7359 tx_desc->txdw3 |= 7354 tx_desc->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
7360 cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723B); 7355 tx_desc->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
7361 tx_desc->txdw3 |=
7362 cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723B);
7363 } 7356 }
7364 } 7357 }
7365 7358
@@ -8385,7 +8378,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
8385 dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n"); 8378 dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n");
8386 sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 8379 sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
8387 } 8380 }
8388 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 8381 hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
8389 8382
8390 hw->wiphy->rts_threshold = 2347; 8383 hw->wiphy->rts_threshold = 2347;
8391 8384
@@ -8454,7 +8447,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = {
8454 .writeN_block_size = 1024, 8447 .writeN_block_size = 1024,
8455 .mbox_ext_reg = REG_HMBOX_EXT_0, 8448 .mbox_ext_reg = REG_HMBOX_EXT_0,
8456 .mbox_ext_width = 2, 8449 .mbox_ext_width = 2,
8457 .tx_desc_size = sizeof(struct rtl8723au_tx_desc), 8450 .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
8458 .adda_1t_init = 0x0b1b25a0, 8451 .adda_1t_init = 0x0b1b25a0,
8459 .adda_1t_path_on = 0x0bdb25a0, 8452 .adda_1t_path_on = 0x0bdb25a0,
8460 .adda_2t_path_on_a = 0x04db25a4, 8453 .adda_2t_path_on_a = 0x04db25a4,
@@ -8482,7 +8475,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = {
8482 .writeN_block_size = 1024, 8475 .writeN_block_size = 1024,
8483 .mbox_ext_reg = REG_HMBOX_EXT0_8723B, 8476 .mbox_ext_reg = REG_HMBOX_EXT0_8723B,
8484 .mbox_ext_width = 4, 8477 .mbox_ext_width = 4,
8485 .tx_desc_size = sizeof(struct rtl8723bu_tx_desc), 8478 .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
8486 .has_s0s1 = 1, 8479 .has_s0s1 = 1,
8487 .adda_1t_init = 0x01c00014, 8480 .adda_1t_init = 0x01c00014,
8488 .adda_1t_path_on = 0x01c00014, 8481 .adda_1t_path_on = 0x01c00014,
@@ -8510,7 +8503,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = {
8510 .writeN_block_size = 128, 8503 .writeN_block_size = 128,
8511 .mbox_ext_reg = REG_HMBOX_EXT_0, 8504 .mbox_ext_reg = REG_HMBOX_EXT_0,
8512 .mbox_ext_width = 2, 8505 .mbox_ext_width = 2,
8513 .tx_desc_size = sizeof(struct rtl8723au_tx_desc), 8506 .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
8514 .adda_1t_init = 0x0b1b25a0, 8507 .adda_1t_init = 0x0b1b25a0,
8515 .adda_1t_path_on = 0x0bdb25a0, 8508 .adda_1t_path_on = 0x0bdb25a0,
8516 .adda_2t_path_on_a = 0x04db25a4, 8509 .adda_2t_path_on_a = 0x04db25a4,
@@ -8532,12 +8525,12 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = {
8532 .enable_rf = rtl8723b_enable_rf, 8525 .enable_rf = rtl8723b_enable_rf,
8533 .disable_rf = rtl8723b_disable_rf, 8526 .disable_rf = rtl8723b_disable_rf,
8534 .set_tx_power = rtl8723b_set_tx_power, 8527 .set_tx_power = rtl8723b_set_tx_power,
8535 .update_rate_mask = rtl8723au_update_rate_mask, 8528 .update_rate_mask = rtl8723bu_update_rate_mask,
8536 .report_connect = rtl8723au_report_connect, 8529 .report_connect = rtl8723bu_report_connect,
8537 .writeN_block_size = 128, 8530 .writeN_block_size = 128,
8538 .mbox_ext_reg = REG_HMBOX_EXT0_8723B, 8531 .mbox_ext_reg = REG_HMBOX_EXT0_8723B,
8539 .mbox_ext_width = 4, 8532 .mbox_ext_width = 4,
8540 .tx_desc_size = sizeof(struct rtl8723au_tx_desc), 8533 .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
8541 .has_s0s1 = 1, 8534 .has_s0s1 = 1,
8542 .adda_1t_init = 0x0fc01616, 8535 .adda_1t_init = 0x0fc01616,
8543 .adda_1t_path_on = 0x0fc01616, 8536 .adda_1t_path_on = 0x0fc01616,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 7b73654e1368..455e1122dbb5 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -65,6 +65,30 @@
65#define EFUSE_BT_MAP_LEN_8723A 1024 65#define EFUSE_BT_MAP_LEN_8723A 1024
66#define EFUSE_MAX_WORD_UNIT 4 66#define EFUSE_MAX_WORD_UNIT 4
67 67
68enum rtl8xxxu_rtl_chip {
69 RTL8192S = 0x81920,
70 RTL8191S = 0x81910,
71 RTL8192C = 0x8192c,
72 RTL8191C = 0x8191c,
73 RTL8188C = 0x8188c,
74 RTL8188R = 0x81889,
75 RTL8192D = 0x8192d,
76 RTL8723A = 0x8723a,
77 RTL8188E = 0x8188e,
78 RTL8812 = 0x88120,
79 RTL8821 = 0x88210,
80 RTL8192E = 0x8192e,
81 RTL8191E = 0x8191e,
82 RTL8723B = 0x8723b,
83 RTL8814A = 0x8814a,
84 RTL8881A = 0x8881a,
85 RTL8821B = 0x8821b,
86 RTL8822B = 0x8822b,
87 RTL8703B = 0x8703b,
88 RTL8195A = 0x8195a,
89 RTL8188F = 0x8188f
90};
91
68enum rtl8xxxu_rx_type { 92enum rtl8xxxu_rx_type {
69 RX_TYPE_DATA_PKT = 0, 93 RX_TYPE_DATA_PKT = 0,
70 RX_TYPE_C2H = 1, 94 RX_TYPE_C2H = 1,
@@ -332,7 +356,7 @@ struct rtl8723bu_rx_desc {
332 __le32 tsfl; 356 __le32 tsfl;
333}; 357};
334 358
335struct rtl8723au_tx_desc { 359struct rtl8xxxu_txdesc32 {
336 __le16 pkt_size; 360 __le16 pkt_size;
337 u8 pkt_offset; 361 u8 pkt_offset;
338 u8 txdw0; 362 u8 txdw0;
@@ -346,7 +370,7 @@ struct rtl8723au_tx_desc {
346 __le16 txdw7; 370 __le16 txdw7;
347}; 371};
348 372
349struct rtl8723bu_tx_desc { 373struct rtl8xxxu_txdesc40 {
350 __le16 pkt_size; 374 __le16 pkt_size;
351 u8 pkt_offset; 375 u8 pkt_offset;
352 u8 txdw0; 376 u8 txdw0;
@@ -422,10 +446,10 @@ struct rtl8723bu_tx_desc {
422 * aggregation enable and break respectively. For 8723bu, bits 0-7 are macid. 446 * aggregation enable and break respectively. For 8723bu, bits 0-7 are macid.
423 */ 447 */
424#define TXDESC_PKT_OFFSET_SZ 0 448#define TXDESC_PKT_OFFSET_SZ 0
425#define TXDESC_AGG_ENABLE_8723A BIT(5) 449#define TXDESC32_AGG_ENABLE BIT(5)
426#define TXDESC_AGG_BREAK_8723A BIT(6) 450#define TXDESC32_AGG_BREAK BIT(6)
427#define TXDESC_MACID_SHIFT_8723B 0 451#define TXDESC40_MACID_SHIFT 0
428#define TXDESC_MACID_MASK_8723B 0x00f0 452#define TXDESC40_MACID_MASK 0x00f0
429#define TXDESC_QUEUE_SHIFT 8 453#define TXDESC_QUEUE_SHIFT 8
430#define TXDESC_QUEUE_MASK 0x1f00 454#define TXDESC_QUEUE_MASK 0x1f00
431#define TXDESC_QUEUE_BK 0x2 455#define TXDESC_QUEUE_BK 0x2
@@ -437,9 +461,9 @@ struct rtl8723bu_tx_desc {
437#define TXDESC_QUEUE_MGNT 0x12 461#define TXDESC_QUEUE_MGNT 0x12
438#define TXDESC_QUEUE_CMD 0x13 462#define TXDESC_QUEUE_CMD 0x13
439#define TXDESC_QUEUE_MAX (TXDESC_QUEUE_CMD + 1) 463#define TXDESC_QUEUE_MAX (TXDESC_QUEUE_CMD + 1)
440#define TXDESC_RDG_NAV_EXT_8723B BIT(13) 464#define TXDESC40_RDG_NAV_EXT BIT(13)
441#define TXDESC_LSIG_TXOP_ENABLE_8723B BIT(14) 465#define TXDESC40_LSIG_TXOP_ENABLE BIT(14)
442#define TXDESC_PIFS_8723B BIT(15) 466#define TXDESC40_PIFS BIT(15)
443 467
444#define DESC_RATE_ID_SHIFT 16 468#define DESC_RATE_ID_SHIFT 16
445#define DESC_RATE_ID_MASK 0xf 469#define DESC_RATE_ID_MASK 0xf
@@ -451,71 +475,71 @@ struct rtl8723bu_tx_desc {
451#define TXDESC_HWPC BIT(31) 475#define TXDESC_HWPC BIT(31)
452 476
453/* Word 2 */ 477/* Word 2 */
454#define TXDESC_PAID_SHIFT_8723B 0 478#define TXDESC40_PAID_SHIFT 0
455#define TXDESC_PAID_MASK_8723B 0x1ff 479#define TXDESC40_PAID_MASK 0x1ff
456#define TXDESC_CCA_RTS_SHIFT_8723B 10 480#define TXDESC40_CCA_RTS_SHIFT 10
457#define TXDESC_CCA_RTS_MASK_8723B 0xc00 481#define TXDESC40_CCA_RTS_MASK 0xc00
458#define TXDESC_AGG_ENABLE_8723B BIT(12) 482#define TXDESC40_AGG_ENABLE BIT(12)
459#define TXDESC_RDG_ENABLE_8723B BIT(13) 483#define TXDESC40_RDG_ENABLE BIT(13)
460#define TXDESC_AGG_BREAK_8723B BIT(16) 484#define TXDESC40_AGG_BREAK BIT(16)
461#define TXDESC_MORE_FRAG_8723B BIT(17) 485#define TXDESC40_MORE_FRAG BIT(17)
462#define TXDESC_RAW_8723B BIT(18) 486#define TXDESC40_RAW BIT(18)
463#define TXDESC_ACK_REPORT_8723A BIT(19) 487#define TXDESC32_ACK_REPORT BIT(19)
464#define TXDESC_SPE_RPT_8723B BIT(19) 488#define TXDESC40_SPE_RPT BIT(19)
465#define TXDESC_AMPDU_DENSITY_SHIFT 20 489#define TXDESC_AMPDU_DENSITY_SHIFT 20
466#define TXDESC_BT_INT_8723B BIT(23) 490#define TXDESC40_BT_INT BIT(23)
467#define TXDESC_GID_8723B BIT(24) 491#define TXDESC40_GID_SHIFT 24
468 492
469/* Word 3 */ 493/* Word 3 */
470#define TXDESC_USE_DRIVER_RATE_8723B BIT(8) 494#define TXDESC40_USE_DRIVER_RATE BIT(8)
471#define TXDESC_CTS_SELF_ENABLE_8723B BIT(11) 495#define TXDESC40_CTS_SELF_ENABLE BIT(11)
472#define TXDESC_RTS_CTS_ENABLE_8723B BIT(12) 496#define TXDESC40_RTS_CTS_ENABLE BIT(12)
473#define TXDESC_HW_RTS_ENABLE_8723B BIT(13) 497#define TXDESC40_HW_RTS_ENABLE BIT(13)
474#define TXDESC_SEQ_SHIFT_8723A 16 498#define TXDESC32_SEQ_SHIFT 16
475#define TXDESC_SEQ_MASK_8723A 0x0fff0000 499#define TXDESC32_SEQ_MASK 0x0fff0000
476 500
477/* Word 4 */ 501/* Word 4 */
478#define TXDESC_RTS_RATE_SHIFT_8723A 0 502#define TXDESC32_RTS_RATE_SHIFT 0
479#define TXDESC_RTS_RATE_MASK_8723A 0x3f 503#define TXDESC32_RTS_RATE_MASK 0x3f
480#define TXDESC_QOS_8723A BIT(6) 504#define TXDESC32_QOS BIT(6)
481#define TXDESC_HW_SEQ_ENABLE_8723A BIT(7) 505#define TXDESC32_HW_SEQ_ENABLE BIT(7)
482#define TXDESC_USE_DRIVER_RATE_8723A BIT(8) 506#define TXDESC32_USE_DRIVER_RATE BIT(8)
483#define TXDESC_DISABLE_DATA_FB BIT(10) 507#define TXDESC_DISABLE_DATA_FB BIT(10)
484#define TXDESC_CTS_SELF_ENABLE_8723A BIT(11) 508#define TXDESC32_CTS_SELF_ENABLE BIT(11)
485#define TXDESC_RTS_CTS_ENABLE_8723A BIT(12) 509#define TXDESC32_RTS_CTS_ENABLE BIT(12)
486#define TXDESC_HW_RTS_ENABLE_8723A BIT(13) 510#define TXDESC32_HW_RTS_ENABLE BIT(13)
487#define TXDESC_PRIME_CH_OFF_LOWER BIT(20) 511#define TXDESC_PRIME_CH_OFF_LOWER BIT(20)
488#define TXDESC_PRIME_CH_OFF_UPPER BIT(21) 512#define TXDESC_PRIME_CH_OFF_UPPER BIT(21)
489#define TXDESC_SHORT_PREAMBLE_8723A BIT(24) 513#define TXDESC32_SHORT_PREAMBLE BIT(24)
490#define TXDESC_DATA_BW BIT(25) 514#define TXDESC_DATA_BW BIT(25)
491#define TXDESC_RTS_DATA_BW BIT(27) 515#define TXDESC_RTS_DATA_BW BIT(27)
492#define TXDESC_RTS_PRIME_CH_OFF_LOWER BIT(28) 516#define TXDESC_RTS_PRIME_CH_OFF_LOWER BIT(28)
493#define TXDESC_RTS_PRIME_CH_OFF_UPPER BIT(29) 517#define TXDESC_RTS_PRIME_CH_OFF_UPPER BIT(29)
494#define TXDESC_DATA_RATE_FB_SHIFT_8723B 8 518#define TXDESC40_DATA_RATE_FB_SHIFT 8
495#define TXDESC_DATA_RATE_FB_MASK_8723B 0x00001f00 519#define TXDESC40_DATA_RATE_FB_MASK 0x00001f00
496#define TXDESC_RETRY_LIMIT_ENABLE_8723B BIT(17) 520#define TXDESC40_RETRY_LIMIT_ENABLE BIT(17)
497#define TXDESC_RETRY_LIMIT_SHIFT_8723B 18 521#define TXDESC40_RETRY_LIMIT_SHIFT 18
498#define TXDESC_RETRY_LIMIT_MASK_8723B 0x00fc0000 522#define TXDESC40_RETRY_LIMIT_MASK 0x00fc0000
499#define TXDESC_RTS_RATE_SHIFT_8723B 24 523#define TXDESC40_RTS_RATE_SHIFT 24
500#define TXDESC_RTS_RATE_MASK_8723B 0x3f000000 524#define TXDESC40_RTS_RATE_MASK 0x3f000000
501 525
502/* Word 5 */ 526/* Word 5 */
503#define TXDESC_SHORT_PREAMBLE_8723B BIT(4) 527#define TXDESC40_SHORT_PREAMBLE BIT(4)
504#define TXDESC_SHORT_GI BIT(6) 528#define TXDESC32_SHORT_GI BIT(6)
505#define TXDESC_CCX_TAG BIT(7) 529#define TXDESC_CCX_TAG BIT(7)
506#define TXDESC_RETRY_LIMIT_ENABLE_8723A BIT(17) 530#define TXDESC32_RETRY_LIMIT_ENABLE BIT(17)
507#define TXDESC_RETRY_LIMIT_SHIFT_8723A 18 531#define TXDESC32_RETRY_LIMIT_SHIFT 18
508#define TXDESC_RETRY_LIMIT_MASK_8723A 0x00fc0000 532#define TXDESC32_RETRY_LIMIT_MASK 0x00fc0000
509 533
510/* Word 6 */ 534/* Word 6 */
511#define TXDESC_MAX_AGG_SHIFT 11 535#define TXDESC_MAX_AGG_SHIFT 11
512 536
513/* Word 8 */ 537/* Word 8 */
514#define TXDESC_HW_SEQ_ENABLE_8723B BIT(15) 538#define TXDESC40_HW_SEQ_ENABLE BIT(15)
515 539
516/* Word 9 */ 540/* Word 9 */
517#define TXDESC_SEQ_SHIFT_8723B 12 541#define TXDESC40_SEQ_SHIFT 12
518#define TXDESC_SEQ_MASK_8723B 0x00fff000 542#define TXDESC40_SEQ_MASK 0x00fff000
519 543
520struct phy_rx_agc_info { 544struct phy_rx_agc_info {
521#ifdef __LITTLE_ENDIAN 545#ifdef __LITTLE_ENDIAN
@@ -1236,7 +1260,7 @@ struct rtl8xxxu_priv {
1236 u32 mac_backup[RTL8XXXU_MAC_REGS]; 1260 u32 mac_backup[RTL8XXXU_MAC_REGS];
1237 u32 bb_backup[RTL8XXXU_BB_REGS]; 1261 u32 bb_backup[RTL8XXXU_BB_REGS];
1238 u32 bb_recovery_backup[RTL8XXXU_BB_REGS]; 1262 u32 bb_recovery_backup[RTL8XXXU_BB_REGS];
1239 u32 rtlchip; 1263 enum rtl8xxxu_rtl_chip rtl_chip;
1240 u8 pi_enabled:1; 1264 u8 pi_enabled:1;
1241 u8 int_buf[USB_INTR_CONTENT_LENGTH]; 1265 u8 int_buf[USB_INTR_CONTENT_LENGTH];
1242}; 1266};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index e545e849f5a3..ade42fe7e742 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -417,13 +417,20 @@
417 417
418/* spec version 11 */ 418/* spec version 11 */
419/* 0x0400 ~ 0x047F Protocol Configuration */ 419/* 0x0400 ~ 0x047F Protocol Configuration */
420#define REG_VOQ_INFORMATION 0x0400 420/* 8192c, 8192d */
421#define REG_VIQ_INFORMATION 0x0404 421#define REG_VOQ_INFO 0x0400
422#define REG_BEQ_INFORMATION 0x0408 422#define REG_VIQ_INFO 0x0404
423#define REG_BKQ_INFORMATION 0x040c 423#define REG_BEQ_INFO 0x0408
424#define REG_MGQ_INFORMATION 0x0410 424#define REG_BKQ_INFO 0x040c
425#define REG_HGQ_INFORMATION 0x0414 425/* 8188e, 8723a, 8812a, 8821a, 8192e, 8723b */
426#define REG_BCNQ_INFORMATION 0x0418 426#define REG_Q0_INFO 0x400
427#define REG_Q1_INFO 0x404
428#define REG_Q2_INFO 0x408
429#define REG_Q3_INFO 0x40c
430
431#define REG_MGQ_INFO 0x0410
432#define REG_HGQ_INFO 0x0414
433#define REG_BCNQ_INFO 0x0418
427 434
428#define REG_CPU_MGQ_INFORMATION 0x041c 435#define REG_CPU_MGQ_INFORMATION 0x041c
429#define REG_FWHW_TXQ_CTRL 0x0420 436#define REG_FWHW_TXQ_CTRL 0x0420
@@ -494,6 +501,9 @@
494#define REG_DATA_SUBCHANNEL 0x0483 501#define REG_DATA_SUBCHANNEL 0x0483
495/* 8723au */ 502/* 8723au */
496#define REG_INIDATA_RATE_SEL 0x0484 503#define REG_INIDATA_RATE_SEL 0x0484
504/* MACID_SLEEP_1/3 for 8723b, 8192e, 8812a, 8821a */
505#define REG_MACID_SLEEP_3_8732B 0x0484
506#define REG_MACID_SLEEP_1_8732B 0x0488
497 507
498#define REG_POWER_STATUS 0x04a4 508#define REG_POWER_STATUS 0x04a4
499#define REG_POWER_STAGE1 0x04b4 509#define REG_POWER_STAGE1 0x04b4
@@ -508,6 +518,13 @@
508#define REG_RTS_MAX_AGGR_NUM 0x04cb 518#define REG_RTS_MAX_AGGR_NUM 0x04cb
509#define REG_BAR_MODE_CTRL 0x04cc 519#define REG_BAR_MODE_CTRL 0x04cc
510#define REG_RA_TRY_RATE_AGG_LMT 0x04cf 520#define REG_RA_TRY_RATE_AGG_LMT 0x04cf
521/* MACID_DROP for 8723a */
522#define REG_MACID_DROP_8732A 0x04d0
523/* EARLY_MODE_CONTROL 8188e */
524#define REG_EARLY_MODE_CONTROL_8188E 0x04d0
525/* MACID_SLEEP_2 for 8723b, 8192e, 8812a, 8821a */
526#define REG_MACID_SLEEP_2_8732B 0x04d0
527#define REG_MACID_SLEEP 0x04d4
511#define REG_NQOS_SEQ 0x04dc 528#define REG_NQOS_SEQ 0x04dc
512#define REG_QOS_SEQ 0x04de 529#define REG_QOS_SEQ 0x04de
513#define REG_NEED_CPU_HANDLE 0x04e0 530#define REG_NEED_CPU_HANDLE 0x04e0
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 0517a4f2d3f2..c74eb139bfa1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -131,7 +131,7 @@ static struct ieee80211_rate rtl_ratetable_5g[] = {
131}; 131};
132 132
133static const struct ieee80211_supported_band rtl_band_2ghz = { 133static const struct ieee80211_supported_band rtl_band_2ghz = {
134 .band = IEEE80211_BAND_2GHZ, 134 .band = NL80211_BAND_2GHZ,
135 135
136 .channels = rtl_channeltable_2g, 136 .channels = rtl_channeltable_2g,
137 .n_channels = ARRAY_SIZE(rtl_channeltable_2g), 137 .n_channels = ARRAY_SIZE(rtl_channeltable_2g),
@@ -143,7 +143,7 @@ static const struct ieee80211_supported_band rtl_band_2ghz = {
143}; 143};
144 144
145static struct ieee80211_supported_band rtl_band_5ghz = { 145static struct ieee80211_supported_band rtl_band_5ghz = {
146 .band = IEEE80211_BAND_5GHZ, 146 .band = NL80211_BAND_5GHZ,
147 147
148 .channels = rtl_channeltable_5g, 148 .channels = rtl_channeltable_5g,
149 .n_channels = ARRAY_SIZE(rtl_channeltable_5g), 149 .n_channels = ARRAY_SIZE(rtl_channeltable_5g),
@@ -197,7 +197,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
197 197
198 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 198 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
199 199
200 /*hw->wiphy->bands[IEEE80211_BAND_2GHZ] 200 /*hw->wiphy->bands[NL80211_BAND_2GHZ]
201 *base on ant_num 201 *base on ant_num
202 *rx_mask: RX mask 202 *rx_mask: RX mask
203 *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7 203 *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
@@ -328,26 +328,26 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
328 rtlhal->bandset == BAND_ON_BOTH) { 328 rtlhal->bandset == BAND_ON_BOTH) {
329 /* 1: 2.4 G bands */ 329 /* 1: 2.4 G bands */
330 /* <1> use mac->bands as mem for hw->wiphy->bands */ 330 /* <1> use mac->bands as mem for hw->wiphy->bands */
331 sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]); 331 sband = &(rtlmac->bands[NL80211_BAND_2GHZ]);
332 332
333 /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ] 333 /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ]
334 * to default value(1T1R) */ 334 * to default value(1T1R) */
335 memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz, 335 memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]), &rtl_band_2ghz,
336 sizeof(struct ieee80211_supported_band)); 336 sizeof(struct ieee80211_supported_band));
337 337
338 /* <3> init ht cap base on ant_num */ 338 /* <3> init ht cap base on ant_num */
339 _rtl_init_hw_ht_capab(hw, &sband->ht_cap); 339 _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
340 340
341 /* <4> set mac->sband to wiphy->sband */ 341 /* <4> set mac->sband to wiphy->sband */
342 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 342 hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
343 343
344 /* 2: 5 G bands */ 344 /* 2: 5 G bands */
345 /* <1> use mac->bands as mem for hw->wiphy->bands */ 345 /* <1> use mac->bands as mem for hw->wiphy->bands */
346 sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]); 346 sband = &(rtlmac->bands[NL80211_BAND_5GHZ]);
347 347
348 /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ] 348 /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ]
349 * to default value(1T1R) */ 349 * to default value(1T1R) */
350 memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz, 350 memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]), &rtl_band_5ghz,
351 sizeof(struct ieee80211_supported_band)); 351 sizeof(struct ieee80211_supported_band));
352 352
353 /* <3> init ht cap base on ant_num */ 353 /* <3> init ht cap base on ant_num */
@@ -355,15 +355,15 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
355 355
356 _rtl_init_hw_vht_capab(hw, &sband->vht_cap); 356 _rtl_init_hw_vht_capab(hw, &sband->vht_cap);
357 /* <4> set mac->sband to wiphy->sband */ 357 /* <4> set mac->sband to wiphy->sband */
358 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; 358 hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
359 } else { 359 } else {
360 if (rtlhal->current_bandtype == BAND_ON_2_4G) { 360 if (rtlhal->current_bandtype == BAND_ON_2_4G) {
361 /* <1> use mac->bands as mem for hw->wiphy->bands */ 361 /* <1> use mac->bands as mem for hw->wiphy->bands */
362 sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]); 362 sband = &(rtlmac->bands[NL80211_BAND_2GHZ]);
363 363
364 /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ] 364 /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ]
365 * to default value(1T1R) */ 365 * to default value(1T1R) */
366 memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), 366 memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]),
367 &rtl_band_2ghz, 367 &rtl_band_2ghz,
368 sizeof(struct ieee80211_supported_band)); 368 sizeof(struct ieee80211_supported_band));
369 369
@@ -371,14 +371,14 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
371 _rtl_init_hw_ht_capab(hw, &sband->ht_cap); 371 _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
372 372
373 /* <4> set mac->sband to wiphy->sband */ 373 /* <4> set mac->sband to wiphy->sband */
374 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 374 hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
375 } else if (rtlhal->current_bandtype == BAND_ON_5G) { 375 } else if (rtlhal->current_bandtype == BAND_ON_5G) {
376 /* <1> use mac->bands as mem for hw->wiphy->bands */ 376 /* <1> use mac->bands as mem for hw->wiphy->bands */
377 sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]); 377 sband = &(rtlmac->bands[NL80211_BAND_5GHZ]);
378 378
379 /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ] 379 /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ]
380 * to default value(1T1R) */ 380 * to default value(1T1R) */
381 memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), 381 memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]),
382 &rtl_band_5ghz, 382 &rtl_band_5ghz,
383 sizeof(struct ieee80211_supported_band)); 383 sizeof(struct ieee80211_supported_band));
384 384
@@ -387,7 +387,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
387 387
388 _rtl_init_hw_vht_capab(hw, &sband->vht_cap); 388 _rtl_init_hw_vht_capab(hw, &sband->vht_cap);
389 /* <4> set mac->sband to wiphy->sband */ 389 /* <4> set mac->sband to wiphy->sband */
390 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; 390 hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
391 } else { 391 } else {
392 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n", 392 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n",
393 rtlhal->current_bandtype); 393 rtlhal->current_bandtype);
@@ -861,7 +861,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
861 861
862/* mac80211's rate_idx is like this: 862/* mac80211's rate_idx is like this:
863 * 863 *
864 * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ 864 * 2.4G band:rx_status->band == NL80211_BAND_2GHZ
865 * 865 *
866 * B/G rate: 866 * B/G rate:
867 * (rx_status->flag & RX_FLAG_HT) = 0, 867 * (rx_status->flag & RX_FLAG_HT) = 0,
@@ -871,7 +871,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
871 * (rx_status->flag & RX_FLAG_HT) = 1, 871 * (rx_status->flag & RX_FLAG_HT) = 1,
872 * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15 872 * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
873 * 873 *
874 * 5G band:rx_status->band == IEEE80211_BAND_5GHZ 874 * 5G band:rx_status->band == NL80211_BAND_5GHZ
875 * A rate: 875 * A rate:
876 * (rx_status->flag & RX_FLAG_HT) = 0, 876 * (rx_status->flag & RX_FLAG_HT) = 0,
877 * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7, 877 * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
@@ -958,7 +958,7 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, bool isvht,
958 return rate_idx; 958 return rate_idx;
959 } 959 }
960 if (false == isht) { 960 if (false == isht) {
961 if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) { 961 if (NL80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
962 switch (desc_rate) { 962 switch (desc_rate) {
963 case DESC_RATE1M: 963 case DESC_RATE1M:
964 rate_idx = 0; 964 rate_idx = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 451456835f87..a30af6cc21f3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -70,83 +70,83 @@ static u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh,
70 if (level_num == 2) { 70 if (level_num == 2) {
71 if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || 71 if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
72 (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { 72 (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
73 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 73 btc_alg_dbg(ALGO_BT_RSSI_STATE,
74 "BT Rssi pre state = LOW\n"); 74 "BT Rssi pre state = LOW\n");
75 if (btrssi >= (rssi_thresh + 75 if (btrssi >= (rssi_thresh +
76 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { 76 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
77 btrssi_state = BTC_RSSI_STATE_HIGH; 77 btrssi_state = BTC_RSSI_STATE_HIGH;
78 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 78 btc_alg_dbg(ALGO_BT_RSSI_STATE,
79 "BT Rssi state switch to High\n"); 79 "BT Rssi state switch to High\n");
80 } else { 80 } else {
81 btrssi_state = BTC_RSSI_STATE_STAY_LOW; 81 btrssi_state = BTC_RSSI_STATE_STAY_LOW;
82 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 82 btc_alg_dbg(ALGO_BT_RSSI_STATE,
83 "BT Rssi state stay at Low\n"); 83 "BT Rssi state stay at Low\n");
84 } 84 }
85 } else { 85 } else {
86 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 86 btc_alg_dbg(ALGO_BT_RSSI_STATE,
87 "BT Rssi pre state = HIGH\n"); 87 "BT Rssi pre state = HIGH\n");
88 if (btrssi < rssi_thresh) { 88 if (btrssi < rssi_thresh) {
89 btrssi_state = BTC_RSSI_STATE_LOW; 89 btrssi_state = BTC_RSSI_STATE_LOW;
90 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 90 btc_alg_dbg(ALGO_BT_RSSI_STATE,
91 "BT Rssi state switch to Low\n"); 91 "BT Rssi state switch to Low\n");
92 } else { 92 } else {
93 btrssi_state = BTC_RSSI_STATE_STAY_HIGH; 93 btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
94 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 94 btc_alg_dbg(ALGO_BT_RSSI_STATE,
95 "BT Rssi state stay at High\n"); 95 "BT Rssi state stay at High\n");
96 } 96 }
97 } 97 }
98 } else if (level_num == 3) { 98 } else if (level_num == 3) {
99 if (rssi_thresh > rssi_thresh1) { 99 if (rssi_thresh > rssi_thresh1) {
100 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 100 btc_alg_dbg(ALGO_BT_RSSI_STATE,
101 "BT Rssi thresh error!!\n"); 101 "BT Rssi thresh error!!\n");
102 return coex_sta->pre_bt_rssi_state; 102 return coex_sta->pre_bt_rssi_state;
103 } 103 }
104 104
105 if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || 105 if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
106 (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { 106 (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
107 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 107 btc_alg_dbg(ALGO_BT_RSSI_STATE,
108 "BT Rssi pre state = LOW\n"); 108 "BT Rssi pre state = LOW\n");
109 if (btrssi >= (rssi_thresh + 109 if (btrssi >= (rssi_thresh +
110 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { 110 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
111 btrssi_state = BTC_RSSI_STATE_MEDIUM; 111 btrssi_state = BTC_RSSI_STATE_MEDIUM;
112 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 112 btc_alg_dbg(ALGO_BT_RSSI_STATE,
113 "BT Rssi state switch to Medium\n"); 113 "BT Rssi state switch to Medium\n");
114 } else { 114 } else {
115 btrssi_state = BTC_RSSI_STATE_STAY_LOW; 115 btrssi_state = BTC_RSSI_STATE_STAY_LOW;
116 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 116 btc_alg_dbg(ALGO_BT_RSSI_STATE,
117 "BT Rssi state stay at Low\n"); 117 "BT Rssi state stay at Low\n");
118 } 118 }
119 } else if ((coex_sta->pre_bt_rssi_state == 119 } else if ((coex_sta->pre_bt_rssi_state ==
120 BTC_RSSI_STATE_MEDIUM) || 120 BTC_RSSI_STATE_MEDIUM) ||
121 (coex_sta->pre_bt_rssi_state == 121 (coex_sta->pre_bt_rssi_state ==
122 BTC_RSSI_STATE_STAY_MEDIUM)) { 122 BTC_RSSI_STATE_STAY_MEDIUM)) {
123 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 123 btc_alg_dbg(ALGO_BT_RSSI_STATE,
124 "[BTCoex], BT Rssi pre state = MEDIUM\n"); 124 "[BTCoex], BT Rssi pre state = MEDIUM\n");
125 if (btrssi >= (rssi_thresh1 + 125 if (btrssi >= (rssi_thresh1 +
126 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { 126 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
127 btrssi_state = BTC_RSSI_STATE_HIGH; 127 btrssi_state = BTC_RSSI_STATE_HIGH;
128 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 128 btc_alg_dbg(ALGO_BT_RSSI_STATE,
129 "BT Rssi state switch to High\n"); 129 "BT Rssi state switch to High\n");
130 } else if (btrssi < rssi_thresh) { 130 } else if (btrssi < rssi_thresh) {
131 btrssi_state = BTC_RSSI_STATE_LOW; 131 btrssi_state = BTC_RSSI_STATE_LOW;
132 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 132 btc_alg_dbg(ALGO_BT_RSSI_STATE,
133 "BT Rssi state switch to Low\n"); 133 "BT Rssi state switch to Low\n");
134 } else { 134 } else {
135 btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM; 135 btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
136 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 136 btc_alg_dbg(ALGO_BT_RSSI_STATE,
137 "BT Rssi state stay at Medium\n"); 137 "BT Rssi state stay at Medium\n");
138 } 138 }
139 } else { 139 } else {
140 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 140 btc_alg_dbg(ALGO_BT_RSSI_STATE,
141 "BT Rssi pre state = HIGH\n"); 141 "BT Rssi pre state = HIGH\n");
142 if (btrssi < rssi_thresh1) { 142 if (btrssi < rssi_thresh1) {
143 btrssi_state = BTC_RSSI_STATE_MEDIUM; 143 btrssi_state = BTC_RSSI_STATE_MEDIUM;
144 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 144 btc_alg_dbg(ALGO_BT_RSSI_STATE,
145 "BT Rssi state switch to Medium\n"); 145 "BT Rssi state switch to Medium\n");
146 } else { 146 } else {
147 btrssi_state = BTC_RSSI_STATE_STAY_HIGH; 147 btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
148 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 148 btc_alg_dbg(ALGO_BT_RSSI_STATE,
149 "BT Rssi state stay at High\n"); 149 "BT Rssi state stay at High\n");
150 } 150 }
151 } 151 }
152 } 152 }
@@ -173,32 +173,28 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
173 if (wifirssi >= (rssi_thresh + 173 if (wifirssi >= (rssi_thresh +
174 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { 174 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
175 wifirssi_state = BTC_RSSI_STATE_HIGH; 175 wifirssi_state = BTC_RSSI_STATE_HIGH;
176 BTC_PRINT(BTC_MSG_ALGORITHM, 176 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
177 ALGO_WIFI_RSSI_STATE, 177 "wifi RSSI state switch to High\n");
178 "wifi RSSI state switch to High\n");
179 } else { 178 } else {
180 wifirssi_state = BTC_RSSI_STATE_STAY_LOW; 179 wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
181 BTC_PRINT(BTC_MSG_ALGORITHM, 180 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
182 ALGO_WIFI_RSSI_STATE, 181 "wifi RSSI state stay at Low\n");
183 "wifi RSSI state stay at Low\n");
184 } 182 }
185 } else { 183 } else {
186 if (wifirssi < rssi_thresh) { 184 if (wifirssi < rssi_thresh) {
187 wifirssi_state = BTC_RSSI_STATE_LOW; 185 wifirssi_state = BTC_RSSI_STATE_LOW;
188 BTC_PRINT(BTC_MSG_ALGORITHM, 186 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
189 ALGO_WIFI_RSSI_STATE, 187 "wifi RSSI state switch to Low\n");
190 "wifi RSSI state switch to Low\n");
191 } else { 188 } else {
192 wifirssi_state = BTC_RSSI_STATE_STAY_HIGH; 189 wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
193 BTC_PRINT(BTC_MSG_ALGORITHM, 190 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
194 ALGO_WIFI_RSSI_STATE, 191 "wifi RSSI state stay at High\n");
195 "wifi RSSI state stay at High\n");
196 } 192 }
197 } 193 }
198 } else if (level_num == 3) { 194 } else if (level_num == 3) {
199 if (rssi_thresh > rssi_thresh1) { 195 if (rssi_thresh > rssi_thresh1) {
200 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, 196 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
201 "wifi RSSI thresh error!!\n"); 197 "wifi RSSI thresh error!!\n");
202 return coex_sta->pre_wifi_rssi_state[index]; 198 return coex_sta->pre_wifi_rssi_state[index];
203 } 199 }
204 200
@@ -209,14 +205,12 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
209 if (wifirssi >= (rssi_thresh + 205 if (wifirssi >= (rssi_thresh +
210 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { 206 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
211 wifirssi_state = BTC_RSSI_STATE_MEDIUM; 207 wifirssi_state = BTC_RSSI_STATE_MEDIUM;
212 BTC_PRINT(BTC_MSG_ALGORITHM, 208 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
213 ALGO_WIFI_RSSI_STATE, 209 "wifi RSSI state switch to Medium\n");
214 "wifi RSSI state switch to Medium\n");
215 } else { 210 } else {
216 wifirssi_state = BTC_RSSI_STATE_STAY_LOW; 211 wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
217 BTC_PRINT(BTC_MSG_ALGORITHM, 212 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
218 ALGO_WIFI_RSSI_STATE, 213 "wifi RSSI state stay at Low\n");
219 "wifi RSSI state stay at Low\n");
220 } 214 }
221 } else if ((coex_sta->pre_wifi_rssi_state[index] == 215 } else if ((coex_sta->pre_wifi_rssi_state[index] ==
222 BTC_RSSI_STATE_MEDIUM) || 216 BTC_RSSI_STATE_MEDIUM) ||
@@ -225,31 +219,26 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
225 if (wifirssi >= (rssi_thresh1 + 219 if (wifirssi >= (rssi_thresh1 +
226 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { 220 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
227 wifirssi_state = BTC_RSSI_STATE_HIGH; 221 wifirssi_state = BTC_RSSI_STATE_HIGH;
228 BTC_PRINT(BTC_MSG_ALGORITHM, 222 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
229 ALGO_WIFI_RSSI_STATE, 223 "wifi RSSI state switch to High\n");
230 "wifi RSSI state switch to High\n");
231 } else if (wifirssi < rssi_thresh) { 224 } else if (wifirssi < rssi_thresh) {
232 wifirssi_state = BTC_RSSI_STATE_LOW; 225 wifirssi_state = BTC_RSSI_STATE_LOW;
233 BTC_PRINT(BTC_MSG_ALGORITHM, 226 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
234 ALGO_WIFI_RSSI_STATE, 227 "wifi RSSI state switch to Low\n");
235 "wifi RSSI state switch to Low\n");
236 } else { 228 } else {
237 wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM; 229 wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
238 BTC_PRINT(BTC_MSG_ALGORITHM, 230 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
239 ALGO_WIFI_RSSI_STATE, 231 "wifi RSSI state stay at Medium\n");
240 "wifi RSSI state stay at Medium\n");
241 } 232 }
242 } else { 233 } else {
243 if (wifirssi < rssi_thresh1) { 234 if (wifirssi < rssi_thresh1) {
244 wifirssi_state = BTC_RSSI_STATE_MEDIUM; 235 wifirssi_state = BTC_RSSI_STATE_MEDIUM;
245 BTC_PRINT(BTC_MSG_ALGORITHM, 236 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
246 ALGO_WIFI_RSSI_STATE, 237 "wifi RSSI state switch to Medium\n");
247 "wifi RSSI state switch to Medium\n");
248 } else { 238 } else {
249 wifirssi_state = BTC_RSSI_STATE_STAY_HIGH; 239 wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
250 BTC_PRINT(BTC_MSG_ALGORITHM, 240 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
251 ALGO_WIFI_RSSI_STATE, 241 "wifi RSSI state stay at High\n");
252 "wifi RSSI state stay at High\n");
253 } 242 }
254 } 243 }
255 } 244 }
@@ -284,26 +273,26 @@ static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist)
284 bt_disabled = false; 273 bt_disabled = false;
285 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, 274 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
286 &bt_disabled); 275 &bt_disabled);
287 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 276 btc_alg_dbg(ALGO_BT_MONITOR,
288 "[BTCoex], BT is enabled !!\n"); 277 "[BTCoex], BT is enabled !!\n");
289 } else { 278 } else {
290 bt_disable_cnt++; 279 bt_disable_cnt++;
291 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 280 btc_alg_dbg(ALGO_BT_MONITOR,
292 "[BTCoex], bt all counters = 0, %d times!!\n", 281 "[BTCoex], bt all counters = 0, %d times!!\n",
293 bt_disable_cnt); 282 bt_disable_cnt);
294 if (bt_disable_cnt >= 2) { 283 if (bt_disable_cnt >= 2) {
295 bt_disabled = true; 284 bt_disabled = true;
296 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, 285 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
297 &bt_disabled); 286 &bt_disabled);
298 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 287 btc_alg_dbg(ALGO_BT_MONITOR,
299 "[BTCoex], BT is disabled !!\n"); 288 "[BTCoex], BT is disabled !!\n");
300 } 289 }
301 } 290 }
302 if (pre_bt_disabled != bt_disabled) { 291 if (pre_bt_disabled != bt_disabled) {
303 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 292 btc_alg_dbg(ALGO_BT_MONITOR,
304 "[BTCoex], BT is from %s to %s!!\n", 293 "[BTCoex], BT is from %s to %s!!\n",
305 (pre_bt_disabled ? "disabled" : "enabled"), 294 (pre_bt_disabled ? "disabled" : "enabled"),
306 (bt_disabled ? "disabled" : "enabled")); 295 (bt_disabled ? "disabled" : "enabled"));
307 pre_bt_disabled = bt_disabled; 296 pre_bt_disabled = bt_disabled;
308 } 297 }
309} 298}
@@ -499,12 +488,12 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
499 coex_sta->low_priority_tx = reg_lp_tx; 488 coex_sta->low_priority_tx = reg_lp_tx;
500 coex_sta->low_priority_rx = reg_lp_rx; 489 coex_sta->low_priority_rx = reg_lp_rx;
501 490
502 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 491 btc_alg_dbg(ALGO_BT_MONITOR,
503 "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", 492 "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
504 reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); 493 reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
505 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 494 btc_alg_dbg(ALGO_BT_MONITOR,
506 "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", 495 "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
507 reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); 496 reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
508 497
509 /* reset counter */ 498 /* reset counter */
510 btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); 499 btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -518,9 +507,9 @@ static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
518 507
519 h2c_parameter[0] |= BIT0; /* trigger */ 508 h2c_parameter[0] |= BIT0; /* trigger */
520 509
521 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 510 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
522 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", 511 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
523 h2c_parameter[0]); 512 h2c_parameter[0]);
524 513
525 btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); 514 btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
526} 515}
@@ -592,8 +581,8 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
592 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); 581 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
593 582
594 if (!bt_link_info->bt_link_exist) { 583 if (!bt_link_info->bt_link_exist) {
595 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 584 btc_alg_dbg(ALGO_TRACE,
596 "No BT link exists!!!\n"); 585 "No BT link exists!!!\n");
597 return algorithm; 586 return algorithm;
598 } 587 }
599 588
@@ -608,27 +597,27 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
608 597
609 if (numdiffprofile == 1) { 598 if (numdiffprofile == 1) {
610 if (bt_link_info->sco_exist) { 599 if (bt_link_info->sco_exist) {
611 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 600 btc_alg_dbg(ALGO_TRACE,
612 "SCO only\n"); 601 "SCO only\n");
613 algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; 602 algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
614 } else { 603 } else {
615 if (bt_link_info->hid_exist) { 604 if (bt_link_info->hid_exist) {
616 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 605 btc_alg_dbg(ALGO_TRACE,
617 "HID only\n"); 606 "HID only\n");
618 algorithm = BT_8192E_2ANT_COEX_ALGO_HID; 607 algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
619 } else if (bt_link_info->a2dp_exist) { 608 } else if (bt_link_info->a2dp_exist) {
620 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 609 btc_alg_dbg(ALGO_TRACE,
621 "A2DP only\n"); 610 "A2DP only\n");
622 algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP; 611 algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
623 } else if (bt_link_info->pan_exist) { 612 } else if (bt_link_info->pan_exist) {
624 if (bt_hson) { 613 if (bt_hson) {
625 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 614 btc_alg_dbg(ALGO_TRACE,
626 "PAN(HS) only\n"); 615 "PAN(HS) only\n");
627 algorithm = 616 algorithm =
628 BT_8192E_2ANT_COEX_ALGO_PANHS; 617 BT_8192E_2ANT_COEX_ALGO_PANHS;
629 } else { 618 } else {
630 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 619 btc_alg_dbg(ALGO_TRACE,
631 "PAN(EDR) only\n"); 620 "PAN(EDR) only\n");
632 algorithm = 621 algorithm =
633 BT_8192E_2ANT_COEX_ALGO_PANEDR; 622 BT_8192E_2ANT_COEX_ALGO_PANEDR;
634 } 623 }
@@ -637,21 +626,21 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
637 } else if (numdiffprofile == 2) { 626 } else if (numdiffprofile == 2) {
638 if (bt_link_info->sco_exist) { 627 if (bt_link_info->sco_exist) {
639 if (bt_link_info->hid_exist) { 628 if (bt_link_info->hid_exist) {
640 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 629 btc_alg_dbg(ALGO_TRACE,
641 "SCO + HID\n"); 630 "SCO + HID\n");
642 algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; 631 algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
643 } else if (bt_link_info->a2dp_exist) { 632 } else if (bt_link_info->a2dp_exist) {
644 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 633 btc_alg_dbg(ALGO_TRACE,
645 "SCO + A2DP ==> SCO\n"); 634 "SCO + A2DP ==> SCO\n");
646 algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; 635 algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
647 } else if (bt_link_info->pan_exist) { 636 } else if (bt_link_info->pan_exist) {
648 if (bt_hson) { 637 if (bt_hson) {
649 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 638 btc_alg_dbg(ALGO_TRACE,
650 "SCO + PAN(HS)\n"); 639 "SCO + PAN(HS)\n");
651 algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; 640 algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
652 } else { 641 } else {
653 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 642 btc_alg_dbg(ALGO_TRACE,
654 "SCO + PAN(EDR)\n"); 643 "SCO + PAN(EDR)\n");
655 algorithm = 644 algorithm =
656 BT_8192E_2ANT_COEX_ALGO_SCO_PAN; 645 BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
657 } 646 }
@@ -660,38 +649,38 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
660 if (bt_link_info->hid_exist && 649 if (bt_link_info->hid_exist &&
661 bt_link_info->a2dp_exist) { 650 bt_link_info->a2dp_exist) {
662 if (stack_info->num_of_hid >= 2) { 651 if (stack_info->num_of_hid >= 2) {
663 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 652 btc_alg_dbg(ALGO_TRACE,
664 "HID*2 + A2DP\n"); 653 "HID*2 + A2DP\n");
665 algorithm = 654 algorithm =
666 BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR; 655 BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
667 } else { 656 } else {
668 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 657 btc_alg_dbg(ALGO_TRACE,
669 "HID + A2DP\n"); 658 "HID + A2DP\n");
670 algorithm = 659 algorithm =
671 BT_8192E_2ANT_COEX_ALGO_HID_A2DP; 660 BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
672 } 661 }
673 } else if (bt_link_info->hid_exist && 662 } else if (bt_link_info->hid_exist &&
674 bt_link_info->pan_exist) { 663 bt_link_info->pan_exist) {
675 if (bt_hson) { 664 if (bt_hson) {
676 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 665 btc_alg_dbg(ALGO_TRACE,
677 "HID + PAN(HS)\n"); 666 "HID + PAN(HS)\n");
678 algorithm = BT_8192E_2ANT_COEX_ALGO_HID; 667 algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
679 } else { 668 } else {
680 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 669 btc_alg_dbg(ALGO_TRACE,
681 "HID + PAN(EDR)\n"); 670 "HID + PAN(EDR)\n");
682 algorithm = 671 algorithm =
683 BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; 672 BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
684 } 673 }
685 } else if (bt_link_info->pan_exist && 674 } else if (bt_link_info->pan_exist &&
686 bt_link_info->a2dp_exist) { 675 bt_link_info->a2dp_exist) {
687 if (bt_hson) { 676 if (bt_hson) {
688 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 677 btc_alg_dbg(ALGO_TRACE,
689 "A2DP + PAN(HS)\n"); 678 "A2DP + PAN(HS)\n");
690 algorithm = 679 algorithm =
691 BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS; 680 BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS;
692 } else { 681 } else {
693 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 682 btc_alg_dbg(ALGO_TRACE,
694 "A2DP + PAN(EDR)\n"); 683 "A2DP + PAN(EDR)\n");
695 algorithm = 684 algorithm =
696 BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP; 685 BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP;
697 } 686 }
@@ -701,30 +690,30 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
701 if (bt_link_info->sco_exist) { 690 if (bt_link_info->sco_exist) {
702 if (bt_link_info->hid_exist && 691 if (bt_link_info->hid_exist &&
703 bt_link_info->a2dp_exist) { 692 bt_link_info->a2dp_exist) {
704 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 693 btc_alg_dbg(ALGO_TRACE,
705 "SCO + HID + A2DP ==> HID\n"); 694 "SCO + HID + A2DP ==> HID\n");
706 algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; 695 algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
707 } else if (bt_link_info->hid_exist && 696 } else if (bt_link_info->hid_exist &&
708 bt_link_info->pan_exist) { 697 bt_link_info->pan_exist) {
709 if (bt_hson) { 698 if (bt_hson) {
710 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 699 btc_alg_dbg(ALGO_TRACE,
711 "SCO + HID + PAN(HS)\n"); 700 "SCO + HID + PAN(HS)\n");
712 algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; 701 algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
713 } else { 702 } else {
714 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 703 btc_alg_dbg(ALGO_TRACE,
715 "SCO + HID + PAN(EDR)\n"); 704 "SCO + HID + PAN(EDR)\n");
716 algorithm = 705 algorithm =
717 BT_8192E_2ANT_COEX_ALGO_SCO_PAN; 706 BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
718 } 707 }
719 } else if (bt_link_info->pan_exist && 708 } else if (bt_link_info->pan_exist &&
720 bt_link_info->a2dp_exist) { 709 bt_link_info->a2dp_exist) {
721 if (bt_hson) { 710 if (bt_hson) {
722 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 711 btc_alg_dbg(ALGO_TRACE,
723 "SCO + A2DP + PAN(HS)\n"); 712 "SCO + A2DP + PAN(HS)\n");
724 algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; 713 algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
725 } else { 714 } else {
726 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 715 btc_alg_dbg(ALGO_TRACE,
727 "SCO + A2DP + PAN(EDR)\n"); 716 "SCO + A2DP + PAN(EDR)\n");
728 algorithm = 717 algorithm =
729 BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; 718 BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
730 } 719 }
@@ -734,13 +723,13 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
734 bt_link_info->pan_exist && 723 bt_link_info->pan_exist &&
735 bt_link_info->a2dp_exist) { 724 bt_link_info->a2dp_exist) {
736 if (bt_hson) { 725 if (bt_hson) {
737 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 726 btc_alg_dbg(ALGO_TRACE,
738 "HID + A2DP + PAN(HS)\n"); 727 "HID + A2DP + PAN(HS)\n");
739 algorithm = 728 algorithm =
740 BT_8192E_2ANT_COEX_ALGO_HID_A2DP; 729 BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
741 } else { 730 } else {
742 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 731 btc_alg_dbg(ALGO_TRACE,
743 "HID + A2DP + PAN(EDR)\n"); 732 "HID + A2DP + PAN(EDR)\n");
744 algorithm = 733 algorithm =
745 BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR; 734 BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
746 } 735 }
@@ -752,12 +741,12 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
752 bt_link_info->pan_exist && 741 bt_link_info->pan_exist &&
753 bt_link_info->a2dp_exist) { 742 bt_link_info->a2dp_exist) {
754 if (bt_hson) { 743 if (bt_hson) {
755 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 744 btc_alg_dbg(ALGO_TRACE,
756 "ErrorSCO+HID+A2DP+PAN(HS)\n"); 745 "ErrorSCO+HID+A2DP+PAN(HS)\n");
757 746
758 } else { 747 } else {
759 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 748 btc_alg_dbg(ALGO_TRACE,
760 "SCO+HID+A2DP+PAN(EDR)\n"); 749 "SCO+HID+A2DP+PAN(EDR)\n");
761 algorithm = 750 algorithm =
762 BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; 751 BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
763 } 752 }
@@ -778,10 +767,10 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
778 */ 767 */
779 h2c_parameter[0] = dac_swinglvl; 768 h2c_parameter[0] = dac_swinglvl;
780 769
781 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 770 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
782 "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl); 771 "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
783 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 772 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
784 "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); 773 "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
785 774
786 btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); 775 btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
787} 776}
@@ -793,9 +782,9 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
793 782
794 h2c_parameter[0] = dec_btpwr_lvl; 783 h2c_parameter[0] = dec_btpwr_lvl;
795 784
796 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 785 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
797 "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n", 786 "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
798 dec_btpwr_lvl, h2c_parameter[0]); 787 dec_btpwr_lvl, h2c_parameter[0]);
799 788
800 btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); 789 btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
801} 790}
@@ -803,15 +792,15 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
803static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist, 792static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
804 bool force_exec, u8 dec_btpwr_lvl) 793 bool force_exec, u8 dec_btpwr_lvl)
805{ 794{
806 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 795 btc_alg_dbg(ALGO_TRACE_FW,
807 "[BTCoex], %s Dec BT power level = %d\n", 796 "[BTCoex], %s Dec BT power level = %d\n",
808 (force_exec ? "force to" : ""), dec_btpwr_lvl); 797 (force_exec ? "force to" : ""), dec_btpwr_lvl);
809 coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl; 798 coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl;
810 799
811 if (!force_exec) { 800 if (!force_exec) {
812 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 801 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
813 "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n", 802 "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
814 coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); 803 coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
815 } 804 }
816 halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr); 805 halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr);
817 806
@@ -828,10 +817,10 @@ static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
828 if (enable_autoreport) 817 if (enable_autoreport)
829 h2c_parameter[0] |= BIT0; 818 h2c_parameter[0] |= BIT0;
830 819
831 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 820 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
832 "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", 821 "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
833 (enable_autoreport ? "Enabled!!" : "Disabled!!"), 822 (enable_autoreport ? "Enabled!!" : "Disabled!!"),
834 h2c_parameter[0]); 823 h2c_parameter[0]);
835 824
836 btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); 825 btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
837} 826}
@@ -840,17 +829,17 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
840 bool force_exec, 829 bool force_exec,
841 bool enable_autoreport) 830 bool enable_autoreport)
842{ 831{
843 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 832 btc_alg_dbg(ALGO_TRACE_FW,
844 "[BTCoex], %s BT Auto report = %s\n", 833 "[BTCoex], %s BT Auto report = %s\n",
845 (force_exec ? "force to" : ""), 834 (force_exec ? "force to" : ""),
846 ((enable_autoreport) ? "Enabled" : "Disabled")); 835 ((enable_autoreport) ? "Enabled" : "Disabled"));
847 coex_dm->cur_bt_auto_report = enable_autoreport; 836 coex_dm->cur_bt_auto_report = enable_autoreport;
848 837
849 if (!force_exec) { 838 if (!force_exec) {
850 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 839 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
851 "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n", 840 "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
852 coex_dm->pre_bt_auto_report, 841 coex_dm->pre_bt_auto_report,
853 coex_dm->cur_bt_auto_report); 842 coex_dm->cur_bt_auto_report);
854 843
855 if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) 844 if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
856 return; 845 return;
@@ -864,16 +853,16 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
864static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist, 853static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
865 bool force_exec, u8 fw_dac_swinglvl) 854 bool force_exec, u8 fw_dac_swinglvl)
866{ 855{
867 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 856 btc_alg_dbg(ALGO_TRACE_FW,
868 "[BTCoex], %s set FW Dac Swing level = %d\n", 857 "[BTCoex], %s set FW Dac Swing level = %d\n",
869 (force_exec ? "force to" : ""), fw_dac_swinglvl); 858 (force_exec ? "force to" : ""), fw_dac_swinglvl);
870 coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl; 859 coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl;
871 860
872 if (!force_exec) { 861 if (!force_exec) {
873 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 862 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
874 "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", 863 "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
875 coex_dm->pre_fw_dac_swing_lvl, 864 coex_dm->pre_fw_dac_swing_lvl,
876 coex_dm->cur_fw_dac_swing_lvl); 865 coex_dm->cur_fw_dac_swing_lvl);
877 866
878 if (coex_dm->pre_fw_dac_swing_lvl == 867 if (coex_dm->pre_fw_dac_swing_lvl ==
879 coex_dm->cur_fw_dac_swing_lvl) 868 coex_dm->cur_fw_dac_swing_lvl)
@@ -891,8 +880,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
891{ 880{
892 if (rx_rf_shrink_on) { 881 if (rx_rf_shrink_on) {
893 /* Shrink RF Rx LPF corner */ 882 /* Shrink RF Rx LPF corner */
894 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 883 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
895 "[BTCoex], Shrink RF Rx LPF corner!!\n"); 884 "[BTCoex], Shrink RF Rx LPF corner!!\n");
896 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 885 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
897 0xfffff, 0xffffc); 886 0xfffff, 0xffffc);
898 } else { 887 } else {
@@ -900,8 +889,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
900 * After initialized, we can use coex_dm->btRf0x1eBackup 889 * After initialized, we can use coex_dm->btRf0x1eBackup
901 */ 890 */
902 if (btcoexist->initilized) { 891 if (btcoexist->initilized) {
903 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 892 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
904 "[BTCoex], Resume RF Rx LPF corner!!\n"); 893 "[BTCoex], Resume RF Rx LPF corner!!\n");
905 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 894 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
906 0xfffff, 895 0xfffff,
907 coex_dm->bt_rf0x1e_backup); 896 coex_dm->bt_rf0x1e_backup);
@@ -912,17 +901,17 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
912static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist, 901static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
913 bool force_exec, bool rx_rf_shrink_on) 902 bool force_exec, bool rx_rf_shrink_on)
914{ 903{
915 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 904 btc_alg_dbg(ALGO_TRACE_SW,
916 "[BTCoex], %s turn Rx RF Shrink = %s\n", 905 "[BTCoex], %s turn Rx RF Shrink = %s\n",
917 (force_exec ? "force to" : ""), 906 (force_exec ? "force to" : ""),
918 ((rx_rf_shrink_on) ? "ON" : "OFF")); 907 ((rx_rf_shrink_on) ? "ON" : "OFF"));
919 coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; 908 coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
920 909
921 if (!force_exec) { 910 if (!force_exec) {
922 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 911 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
923 "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n", 912 "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
924 coex_dm->pre_rf_rx_lpf_shrink, 913 coex_dm->pre_rf_rx_lpf_shrink,
925 coex_dm->cur_rf_rx_lpf_shrink); 914 coex_dm->cur_rf_rx_lpf_shrink);
926 915
927 if (coex_dm->pre_rf_rx_lpf_shrink == 916 if (coex_dm->pre_rf_rx_lpf_shrink ==
928 coex_dm->cur_rf_rx_lpf_shrink) 917 coex_dm->cur_rf_rx_lpf_shrink)
@@ -939,8 +928,8 @@ static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
939{ 928{
940 u8 val = (u8)level; 929 u8 val = (u8)level;
941 930
942 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 931 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
943 "[BTCoex], Write SwDacSwing = 0x%x\n", level); 932 "[BTCoex], Write SwDacSwing = 0x%x\n", level);
944 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val); 933 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
945} 934}
946 935
@@ -958,22 +947,22 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
958 bool force_exec, bool dac_swingon, 947 bool force_exec, bool dac_swingon,
959 u32 dac_swinglvl) 948 u32 dac_swinglvl)
960{ 949{
961 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 950 btc_alg_dbg(ALGO_TRACE_SW,
962 "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n", 951 "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
963 (force_exec ? "force to" : ""), 952 (force_exec ? "force to" : ""),
964 ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl); 953 ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
965 coex_dm->cur_dac_swing_on = dac_swingon; 954 coex_dm->cur_dac_swing_on = dac_swingon;
966 coex_dm->cur_dac_swing_lvl = dac_swinglvl; 955 coex_dm->cur_dac_swing_lvl = dac_swinglvl;
967 956
968 if (!force_exec) { 957 if (!force_exec) {
969 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 958 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
970 "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ", 959 "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
971 coex_dm->pre_dac_swing_on, 960 coex_dm->pre_dac_swing_on,
972 coex_dm->pre_dac_swing_lvl); 961 coex_dm->pre_dac_swing_lvl);
973 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 962 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
974 "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n", 963 "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
975 coex_dm->cur_dac_swing_on, 964 coex_dm->cur_dac_swing_on,
976 coex_dm->cur_dac_swing_lvl); 965 coex_dm->cur_dac_swing_lvl);
977 966
978 if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && 967 if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
979 (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl)) 968 (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -991,8 +980,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
991{ 980{
992 /* BB AGC Gain Table */ 981 /* BB AGC Gain Table */
993 if (agc_table_en) { 982 if (agc_table_en) {
994 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 983 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
995 "[BTCoex], BB Agc Table On!\n"); 984 "[BTCoex], BB Agc Table On!\n");
996 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001); 985 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001);
997 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001); 986 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001);
998 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001); 987 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001);
@@ -1000,8 +989,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
1000 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001); 989 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001);
1001 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001); 990 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001);
1002 } else { 991 } else {
1003 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 992 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
1004 "[BTCoex], BB Agc Table Off!\n"); 993 "[BTCoex], BB Agc Table Off!\n");
1005 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001); 994 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
1006 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001); 995 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
1007 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001); 996 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -1014,16 +1003,17 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
1014static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist, 1003static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
1015 bool force_exec, bool agc_table_en) 1004 bool force_exec, bool agc_table_en)
1016{ 1005{
1017 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 1006 btc_alg_dbg(ALGO_TRACE_SW,
1018 "[BTCoex], %s %s Agc Table\n", 1007 "[BTCoex], %s %s Agc Table\n",
1019 (force_exec ? "force to" : ""), 1008 (force_exec ? "force to" : ""),
1020 ((agc_table_en) ? "Enable" : "Disable")); 1009 ((agc_table_en) ? "Enable" : "Disable"));
1021 coex_dm->cur_agc_table_en = agc_table_en; 1010 coex_dm->cur_agc_table_en = agc_table_en;
1022 1011
1023 if (!force_exec) { 1012 if (!force_exec) {
1024 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 1013 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
1025 "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", 1014 "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
1026 coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en); 1015 coex_dm->pre_agc_table_en,
1016 coex_dm->cur_agc_table_en);
1027 1017
1028 if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) 1018 if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
1029 return; 1019 return;
@@ -1037,20 +1027,20 @@ static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
1037 u32 val0x6c0, u32 val0x6c4, 1027 u32 val0x6c0, u32 val0x6c4,
1038 u32 val0x6c8, u8 val0x6cc) 1028 u32 val0x6c8, u8 val0x6cc)
1039{ 1029{
1040 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 1030 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
1041 "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); 1031 "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
1042 btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); 1032 btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
1043 1033
1044 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 1034 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
1045 "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); 1035 "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
1046 btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); 1036 btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
1047 1037
1048 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 1038 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
1049 "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); 1039 "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
1050 btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); 1040 btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
1051 1041
1052 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 1042 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
1053 "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); 1043 "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
1054 btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); 1044 btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
1055} 1045}
1056 1046
@@ -1059,30 +1049,30 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist,
1059 u32 val0x6c0, u32 val0x6c4, 1049 u32 val0x6c0, u32 val0x6c4,
1060 u32 val0x6c8, u8 val0x6cc) 1050 u32 val0x6c8, u8 val0x6cc)
1061{ 1051{
1062 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 1052 btc_alg_dbg(ALGO_TRACE_SW,
1063 "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ", 1053 "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
1064 (force_exec ? "force to" : ""), val0x6c0); 1054 (force_exec ? "force to" : ""), val0x6c0);
1065 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 1055 btc_alg_dbg(ALGO_TRACE_SW,
1066 "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", 1056 "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
1067 val0x6c4, val0x6c8, val0x6cc); 1057 val0x6c4, val0x6c8, val0x6cc);
1068 coex_dm->cur_val0x6c0 = val0x6c0; 1058 coex_dm->cur_val0x6c0 = val0x6c0;
1069 coex_dm->cur_val0x6c4 = val0x6c4; 1059 coex_dm->cur_val0x6c4 = val0x6c4;
1070 coex_dm->cur_val0x6c8 = val0x6c8; 1060 coex_dm->cur_val0x6c8 = val0x6c8;
1071 coex_dm->cur_val0x6cc = val0x6cc; 1061 coex_dm->cur_val0x6cc = val0x6cc;
1072 1062
1073 if (!force_exec) { 1063 if (!force_exec) {
1074 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 1064 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
1075 "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ", 1065 "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
1076 coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4); 1066 coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
1077 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 1067 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
1078 "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n", 1068 "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
1079 coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); 1069 coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
1080 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 1070 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
1081 "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x,\n", 1071 "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
1082 coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4); 1072 coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
1083 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 1073 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
1084 "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n", 1074 "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
1085 coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); 1075 coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
1086 1076
1087 if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && 1077 if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
1088 (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && 1078 (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1136,9 +1126,9 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
1136 if (enable) 1126 if (enable)
1137 h2c_parameter[0] |= BIT0; /* function enable */ 1127 h2c_parameter[0] |= BIT0; /* function enable */
1138 1128
1139 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 1129 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
1140 "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", 1130 "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
1141 h2c_parameter[0]); 1131 h2c_parameter[0]);
1142 1132
1143 btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); 1133 btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
1144} 1134}
@@ -1146,18 +1136,18 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
1146static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist, 1136static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
1147 bool force_exec, bool enable) 1137 bool force_exec, bool enable)
1148{ 1138{
1149 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 1139 btc_alg_dbg(ALGO_TRACE_FW,
1150 "[BTCoex], %s turn Ignore WlanAct %s\n", 1140 "[BTCoex], %s turn Ignore WlanAct %s\n",
1151 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); 1141 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
1152 coex_dm->cur_ignore_wlan_act = enable; 1142 coex_dm->cur_ignore_wlan_act = enable;
1153 1143
1154 if (!force_exec) { 1144 if (!force_exec) {
1155 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1145 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1156 "[BTCoex], bPreIgnoreWlanAct = %d ", 1146 "[BTCoex], bPreIgnoreWlanAct = %d ",
1157 coex_dm->pre_ignore_wlan_act); 1147 coex_dm->pre_ignore_wlan_act);
1158 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1148 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1159 "bCurIgnoreWlanAct = %d!!\n", 1149 "bCurIgnoreWlanAct = %d!!\n",
1160 coex_dm->cur_ignore_wlan_act); 1150 coex_dm->cur_ignore_wlan_act);
1161 1151
1162 if (coex_dm->pre_ignore_wlan_act == 1152 if (coex_dm->pre_ignore_wlan_act ==
1163 coex_dm->cur_ignore_wlan_act) 1153 coex_dm->cur_ignore_wlan_act)
@@ -1185,11 +1175,11 @@ static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
1185 coex_dm->ps_tdma_para[3] = byte4; 1175 coex_dm->ps_tdma_para[3] = byte4;
1186 coex_dm->ps_tdma_para[4] = byte5; 1176 coex_dm->ps_tdma_para[4] = byte5;
1187 1177
1188 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 1178 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
1189 "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n", 1179 "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
1190 h2c_parameter[0], 1180 h2c_parameter[0],
1191 h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | 1181 h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
1192 h2c_parameter[3] << 8 | h2c_parameter[4]); 1182 h2c_parameter[3] << 8 | h2c_parameter[4]);
1193 1183
1194 btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); 1184 btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
1195} 1185}
@@ -1213,20 +1203,20 @@ static void btc8192e2ant_sw_mec2(struct btc_coexist *btcoexist,
1213static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist, 1203static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
1214 bool force_exec, bool turn_on, u8 type) 1204 bool force_exec, bool turn_on, u8 type)
1215{ 1205{
1216 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 1206 btc_alg_dbg(ALGO_TRACE_FW,
1217 "[BTCoex], %s turn %s PS TDMA, type=%d\n", 1207 "[BTCoex], %s turn %s PS TDMA, type=%d\n",
1218 (force_exec ? "force to" : ""), 1208 (force_exec ? "force to" : ""),
1219 (turn_on ? "ON" : "OFF"), type); 1209 (turn_on ? "ON" : "OFF"), type);
1220 coex_dm->cur_ps_tdma_on = turn_on; 1210 coex_dm->cur_ps_tdma_on = turn_on;
1221 coex_dm->cur_ps_tdma = type; 1211 coex_dm->cur_ps_tdma = type;
1222 1212
1223 if (!force_exec) { 1213 if (!force_exec) {
1224 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1214 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1225 "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", 1215 "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
1226 coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); 1216 coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
1227 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1217 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1228 "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", 1218 "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
1229 coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); 1219 coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
1230 1220
1231 if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && 1221 if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
1232 (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) 1222 (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1353,8 +1343,8 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
1353 u8 mimops = BTC_MIMO_PS_DYNAMIC; 1343 u8 mimops = BTC_MIMO_PS_DYNAMIC;
1354 u32 disra_mask = 0x0; 1344 u32 disra_mask = 0x0;
1355 1345
1356 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1346 btc_alg_dbg(ALGO_TRACE,
1357 "[BTCoex], REAL set SS Type = %d\n", sstype); 1347 "[BTCoex], REAL set SS Type = %d\n", sstype);
1358 1348
1359 disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype, 1349 disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype,
1360 coex_dm->curra_masktype); 1350 coex_dm->curra_masktype);
@@ -1386,9 +1376,9 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
1386static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist, 1376static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist,
1387 bool force_exec, u8 new_sstype) 1377 bool force_exec, u8 new_sstype)
1388{ 1378{
1389 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1379 btc_alg_dbg(ALGO_TRACE,
1390 "[BTCoex], %s Switch SS Type = %d\n", 1380 "[BTCoex], %s Switch SS Type = %d\n",
1391 (force_exec ? "force to" : ""), new_sstype); 1381 (force_exec ? "force to" : ""), new_sstype);
1392 coex_dm->cur_sstype = new_sstype; 1382 coex_dm->cur_sstype = new_sstype;
1393 1383
1394 if (!force_exec) { 1384 if (!force_exec) {
@@ -1469,8 +1459,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
1469 btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, 1459 btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
1470 &low_pwr_disable); 1460 &low_pwr_disable);
1471 1461
1472 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1462 btc_alg_dbg(ALGO_TRACE,
1473 "[BTCoex], Wifi non-connected idle!!\n"); 1463 "[BTCoex], Wifi non-connected idle!!\n");
1474 1464
1475 if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE == 1465 if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
1476 coex_dm->bt_status) || 1466 coex_dm->bt_status) ||
@@ -1506,8 +1496,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
1506 BTC_SET_ACT_DISABLE_LOW_POWER, 1496 BTC_SET_ACT_DISABLE_LOW_POWER,
1507 &low_pwr_disable); 1497 &low_pwr_disable);
1508 1498
1509 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1499 btc_alg_dbg(ALGO_TRACE,
1510 "Wifi connected + BT non connected-idle!!\n"); 1500 "Wifi connected + BT non connected-idle!!\n");
1511 1501
1512 halbtc8192e2ant_switch_sstype(btcoexist, 1502 halbtc8192e2ant_switch_sstype(btcoexist,
1513 NORMAL_EXEC, 2); 1503 NORMAL_EXEC, 2);
@@ -1534,8 +1524,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
1534 1524
1535 if (bt_hson) 1525 if (bt_hson)
1536 return false; 1526 return false;
1537 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1527 btc_alg_dbg(ALGO_TRACE,
1538 "Wifi connected + BT connected-idle!!\n"); 1528 "Wifi connected + BT connected-idle!!\n");
1539 1529
1540 halbtc8192e2ant_switch_sstype(btcoexist, 1530 halbtc8192e2ant_switch_sstype(btcoexist,
1541 NORMAL_EXEC, 2); 1531 NORMAL_EXEC, 2);
@@ -1560,12 +1550,12 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
1560 &low_pwr_disable); 1550 &low_pwr_disable);
1561 1551
1562 if (wifi_busy) { 1552 if (wifi_busy) {
1563 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1553 btc_alg_dbg(ALGO_TRACE,
1564 "Wifi Connected-Busy + BT Busy!!\n"); 1554 "Wifi Connected-Busy + BT Busy!!\n");
1565 common = false; 1555 common = false;
1566 } else { 1556 } else {
1567 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1557 btc_alg_dbg(ALGO_TRACE,
1568 "Wifi Connected-Idle + BT Busy!!\n"); 1558 "Wifi Connected-Idle + BT Busy!!\n");
1569 1559
1570 halbtc8192e2ant_switch_sstype(btcoexist, 1560 halbtc8192e2ant_switch_sstype(btcoexist,
1571 NORMAL_EXEC, 1); 1561 NORMAL_EXEC, 1);
@@ -1592,9 +1582,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
1592 int result) 1582 int result)
1593{ 1583{
1594 if (tx_pause) { 1584 if (tx_pause) {
1595 BTC_PRINT(BTC_MSG_ALGORITHM, 1585 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1596 ALGO_TRACE_FW_DETAIL, 1586 "[BTCoex], TxPause = 1\n");
1597 "[BTCoex], TxPause = 1\n");
1598 1587
1599 if (coex_dm->cur_ps_tdma == 71) { 1588 if (coex_dm->cur_ps_tdma == 71) {
1600 halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1589 halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1689,9 +1678,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
1689 } 1678 }
1690 } 1679 }
1691 } else { 1680 } else {
1692 BTC_PRINT(BTC_MSG_ALGORITHM, 1681 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1693 ALGO_TRACE_FW_DETAIL, 1682 "[BTCoex], TxPause = 0\n");
1694 "[BTCoex], TxPause = 0\n");
1695 if (coex_dm->cur_ps_tdma == 5) { 1683 if (coex_dm->cur_ps_tdma == 5) {
1696 halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1684 halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1697 true, 71); 1685 true, 71);
@@ -1795,9 +1783,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
1795 int result) 1783 int result)
1796{ 1784{
1797 if (tx_pause) { 1785 if (tx_pause) {
1798 BTC_PRINT(BTC_MSG_ALGORITHM, 1786 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1799 ALGO_TRACE_FW_DETAIL, 1787 "[BTCoex], TxPause = 1\n");
1800 "[BTCoex], TxPause = 1\n");
1801 if (coex_dm->cur_ps_tdma == 1) { 1788 if (coex_dm->cur_ps_tdma == 1) {
1802 halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1789 halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1803 true, 6); 1790 true, 6);
@@ -1886,9 +1873,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
1886 } 1873 }
1887 } 1874 }
1888 } else { 1875 } else {
1889 BTC_PRINT(BTC_MSG_ALGORITHM, 1876 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1890 ALGO_TRACE_FW_DETAIL, 1877 "[BTCoex], TxPause = 0\n");
1891 "[BTCoex], TxPause = 0\n");
1892 if (coex_dm->cur_ps_tdma == 5) { 1878 if (coex_dm->cur_ps_tdma == 5) {
1893 halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1879 halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1894 true, 2); 1880 true, 2);
@@ -1983,9 +1969,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
1983 int result) 1969 int result)
1984{ 1970{
1985 if (tx_pause) { 1971 if (tx_pause) {
1986 BTC_PRINT(BTC_MSG_ALGORITHM, 1972 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1987 ALGO_TRACE_FW_DETAIL, 1973 "[BTCoex], TxPause = 1\n");
1988 "[BTCoex], TxPause = 1\n");
1989 if (coex_dm->cur_ps_tdma == 1) { 1974 if (coex_dm->cur_ps_tdma == 1) {
1990 halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1975 halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1991 true, 7); 1976 true, 7);
@@ -2074,9 +2059,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
2074 } 2059 }
2075 } 2060 }
2076 } else { 2061 } else {
2077 BTC_PRINT(BTC_MSG_ALGORITHM, 2062 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2078 ALGO_TRACE_FW_DETAIL, 2063 "[BTCoex], TxPause = 0\n");
2079 "[BTCoex], TxPause = 0\n");
2080 if (coex_dm->cur_ps_tdma == 5) { 2064 if (coex_dm->cur_ps_tdma == 5) {
2081 halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, 2065 halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
2082 true, 3); 2066 true, 3);
@@ -2178,13 +2162,13 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2178 int result; 2162 int result;
2179 u8 retry_cnt = 0; 2163 u8 retry_cnt = 0;
2180 2164
2181 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 2165 btc_alg_dbg(ALGO_TRACE_FW,
2182 "[BTCoex], TdmaDurationAdjust()\n"); 2166 "[BTCoex], TdmaDurationAdjust()\n");
2183 2167
2184 if (!coex_dm->auto_tdma_adjust) { 2168 if (!coex_dm->auto_tdma_adjust) {
2185 coex_dm->auto_tdma_adjust = true; 2169 coex_dm->auto_tdma_adjust = true;
2186 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2170 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2187 "[BTCoex], first run TdmaDurationAdjust()!!\n"); 2171 "[BTCoex], first run TdmaDurationAdjust()!!\n");
2188 if (sco_hid) { 2172 if (sco_hid) {
2189 if (tx_pause) { 2173 if (tx_pause) {
2190 if (max_interval == 1) { 2174 if (max_interval == 1) {
@@ -2288,11 +2272,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2288 } else { 2272 } else {
2289 /* accquire the BT TRx retry count from BT_Info byte2 */ 2273 /* accquire the BT TRx retry count from BT_Info byte2 */
2290 retry_cnt = coex_sta->bt_retry_cnt; 2274 retry_cnt = coex_sta->bt_retry_cnt;
2291 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2275 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2292 "[BTCoex], retry_cnt = %d\n", retry_cnt); 2276 "[BTCoex], retry_cnt = %d\n", retry_cnt);
2293 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2277 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2294 "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n", 2278 "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
2295 up, dn, m, n, wait_cnt); 2279 up, dn, m, n, wait_cnt);
2296 result = 0; 2280 result = 0;
2297 wait_cnt++; 2281 wait_cnt++;
2298 /* no retry in the last 2-second duration */ 2282 /* no retry in the last 2-second duration */
@@ -2309,9 +2293,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2309 up = 0; 2293 up = 0;
2310 dn = 0; 2294 dn = 0;
2311 result = 1; 2295 result = 1;
2312 BTC_PRINT(BTC_MSG_ALGORITHM, 2296 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2313 ALGO_TRACE_FW_DETAIL, 2297 "[BTCoex]Increase wifi duration!!\n");
2314 "[BTCoex]Increase wifi duration!!\n");
2315 } 2298 }
2316 } else if (retry_cnt <= 3) { 2299 } else if (retry_cnt <= 3) {
2317 up--; 2300 up--;
@@ -2334,9 +2317,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2334 dn = 0; 2317 dn = 0;
2335 wait_cnt = 0; 2318 wait_cnt = 0;
2336 result = -1; 2319 result = -1;
2337 BTC_PRINT(BTC_MSG_ALGORITHM, 2320 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2338 ALGO_TRACE_FW_DETAIL, 2321 "Reduce wifi duration for retry<3\n");
2339 "Reduce wifi duration for retry<3\n");
2340 } 2322 }
2341 } else { 2323 } else {
2342 if (wait_cnt == 1) 2324 if (wait_cnt == 1)
@@ -2352,12 +2334,12 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2352 dn = 0; 2334 dn = 0;
2353 wait_cnt = 0; 2335 wait_cnt = 0;
2354 result = -1; 2336 result = -1;
2355 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2337 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2356 "Decrease wifi duration for retryCounter>3!!\n"); 2338 "Decrease wifi duration for retryCounter>3!!\n");
2357 } 2339 }
2358 2340
2359 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2341 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2360 "[BTCoex], max Interval = %d\n", max_interval); 2342 "[BTCoex], max Interval = %d\n", max_interval);
2361 if (max_interval == 1) 2343 if (max_interval == 1)
2362 btc8192e_int1(btcoexist, tx_pause, result); 2344 btc8192e_int1(btcoexist, tx_pause, result);
2363 else if (max_interval == 2) 2345 else if (max_interval == 2)
@@ -2373,11 +2355,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2373 if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { 2355 if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
2374 bool scan = false, link = false, roam = false; 2356 bool scan = false, link = false, roam = false;
2375 2357
2376 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2358 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2377 "[BTCoex], PsTdma type dismatch!!!, "); 2359 "[BTCoex], PsTdma type dismatch!!!, ");
2378 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2360 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2379 "curPsTdma=%d, recordPsTdma=%d\n", 2361 "curPsTdma=%d, recordPsTdma=%d\n",
2380 coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); 2362 coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
2381 2363
2382 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); 2364 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
2383 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); 2365 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2388,9 +2370,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2388 true, 2370 true,
2389 coex_dm->tdma_adj_type); 2371 coex_dm->tdma_adj_type);
2390 else 2372 else
2391 BTC_PRINT(BTC_MSG_ALGORITHM, 2373 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2392 ALGO_TRACE_FW_DETAIL, 2374 "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
2393 "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
2394 } 2375 }
2395} 2376}
2396 2377
@@ -2594,8 +2575,8 @@ static void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
2594 btrssi_state == BTC_RSSI_STATE_STAY_LOW) && 2575 btrssi_state == BTC_RSSI_STATE_STAY_LOW) &&
2595 (wifirssi_state == BTC_RSSI_STATE_LOW || 2576 (wifirssi_state == BTC_RSSI_STATE_LOW ||
2596 wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) { 2577 wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) {
2597 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2578 btc_alg_dbg(ALGO_TRACE,
2598 "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n"); 2579 "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
2599 long_dist = true; 2580 long_dist = true;
2600 } 2581 }
2601 if (long_dist) { 2582 if (long_dist) {
@@ -3100,105 +3081,105 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
3100{ 3081{
3101 u8 algorithm = 0; 3082 u8 algorithm = 0;
3102 3083
3103 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3084 btc_alg_dbg(ALGO_TRACE,
3104 "[BTCoex], RunCoexistMechanism()===>\n"); 3085 "[BTCoex], RunCoexistMechanism()===>\n");
3105 3086
3106 if (btcoexist->manual_control) { 3087 if (btcoexist->manual_control) {
3107 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3088 btc_alg_dbg(ALGO_TRACE,
3108 "[BTCoex], return for Manual CTRL <===\n"); 3089 "[BTCoex], return for Manual CTRL <===\n");
3109 return; 3090 return;
3110 } 3091 }
3111 3092
3112 if (coex_sta->under_ips) { 3093 if (coex_sta->under_ips) {
3113 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3094 btc_alg_dbg(ALGO_TRACE,
3114 "[BTCoex], wifi is under IPS !!!\n"); 3095 "[BTCoex], wifi is under IPS !!!\n");
3115 return; 3096 return;
3116 } 3097 }
3117 3098
3118 algorithm = halbtc8192e2ant_action_algorithm(btcoexist); 3099 algorithm = halbtc8192e2ant_action_algorithm(btcoexist);
3119 if (coex_sta->c2h_bt_inquiry_page && 3100 if (coex_sta->c2h_bt_inquiry_page &&
3120 (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) { 3101 (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
3121 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3102 btc_alg_dbg(ALGO_TRACE,
3122 "[BTCoex], BT is under inquiry/page scan !!\n"); 3103 "[BTCoex], BT is under inquiry/page scan !!\n");
3123 halbtc8192e2ant_action_bt_inquiry(btcoexist); 3104 halbtc8192e2ant_action_bt_inquiry(btcoexist);
3124 return; 3105 return;
3125 } 3106 }
3126 3107
3127 coex_dm->cur_algorithm = algorithm; 3108 coex_dm->cur_algorithm = algorithm;
3128 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3109 btc_alg_dbg(ALGO_TRACE,
3129 "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); 3110 "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
3130 3111
3131 if (halbtc8192e2ant_is_common_action(btcoexist)) { 3112 if (halbtc8192e2ant_is_common_action(btcoexist)) {
3132 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3113 btc_alg_dbg(ALGO_TRACE,
3133 "[BTCoex], Action 2-Ant common.\n"); 3114 "[BTCoex], Action 2-Ant common\n");
3134 coex_dm->auto_tdma_adjust = false; 3115 coex_dm->auto_tdma_adjust = false;
3135 } else { 3116 } else {
3136 if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { 3117 if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
3137 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3118 btc_alg_dbg(ALGO_TRACE,
3138 "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n", 3119 "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
3139 coex_dm->pre_algorithm, 3120 coex_dm->pre_algorithm,
3140 coex_dm->cur_algorithm); 3121 coex_dm->cur_algorithm);
3141 coex_dm->auto_tdma_adjust = false; 3122 coex_dm->auto_tdma_adjust = false;
3142 } 3123 }
3143 switch (coex_dm->cur_algorithm) { 3124 switch (coex_dm->cur_algorithm) {
3144 case BT_8192E_2ANT_COEX_ALGO_SCO: 3125 case BT_8192E_2ANT_COEX_ALGO_SCO:
3145 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3126 btc_alg_dbg(ALGO_TRACE,
3146 "Action 2-Ant, algorithm = SCO.\n"); 3127 "Action 2-Ant, algorithm = SCO\n");
3147 halbtc8192e2ant_action_sco(btcoexist); 3128 halbtc8192e2ant_action_sco(btcoexist);
3148 break; 3129 break;
3149 case BT_8192E_2ANT_COEX_ALGO_SCO_PAN: 3130 case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
3150 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3131 btc_alg_dbg(ALGO_TRACE,
3151 "Action 2-Ant, algorithm = SCO+PAN(EDR).\n"); 3132 "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
3152 halbtc8192e2ant_action_sco_pan(btcoexist); 3133 halbtc8192e2ant_action_sco_pan(btcoexist);
3153 break; 3134 break;
3154 case BT_8192E_2ANT_COEX_ALGO_HID: 3135 case BT_8192E_2ANT_COEX_ALGO_HID:
3155 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3136 btc_alg_dbg(ALGO_TRACE,
3156 "Action 2-Ant, algorithm = HID.\n"); 3137 "Action 2-Ant, algorithm = HID\n");
3157 halbtc8192e2ant_action_hid(btcoexist); 3138 halbtc8192e2ant_action_hid(btcoexist);
3158 break; 3139 break;
3159 case BT_8192E_2ANT_COEX_ALGO_A2DP: 3140 case BT_8192E_2ANT_COEX_ALGO_A2DP:
3160 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3141 btc_alg_dbg(ALGO_TRACE,
3161 "Action 2-Ant, algorithm = A2DP.\n"); 3142 "Action 2-Ant, algorithm = A2DP\n");
3162 halbtc8192e2ant_action_a2dp(btcoexist); 3143 halbtc8192e2ant_action_a2dp(btcoexist);
3163 break; 3144 break;
3164 case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS: 3145 case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
3165 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3146 btc_alg_dbg(ALGO_TRACE,
3166 "Action 2-Ant, algorithm = A2DP+PAN(HS).\n"); 3147 "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
3167 halbtc8192e2ant_action_a2dp_pan_hs(btcoexist); 3148 halbtc8192e2ant_action_a2dp_pan_hs(btcoexist);
3168 break; 3149 break;
3169 case BT_8192E_2ANT_COEX_ALGO_PANEDR: 3150 case BT_8192E_2ANT_COEX_ALGO_PANEDR:
3170 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3151 btc_alg_dbg(ALGO_TRACE,
3171 "Action 2-Ant, algorithm = PAN(EDR).\n"); 3152 "Action 2-Ant, algorithm = PAN(EDR)\n");
3172 halbtc8192e2ant_action_pan_edr(btcoexist); 3153 halbtc8192e2ant_action_pan_edr(btcoexist);
3173 break; 3154 break;
3174 case BT_8192E_2ANT_COEX_ALGO_PANHS: 3155 case BT_8192E_2ANT_COEX_ALGO_PANHS:
3175 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3156 btc_alg_dbg(ALGO_TRACE,
3176 "Action 2-Ant, algorithm = HS mode.\n"); 3157 "Action 2-Ant, algorithm = HS mode\n");
3177 halbtc8192e2ant_action_pan_hs(btcoexist); 3158 halbtc8192e2ant_action_pan_hs(btcoexist);
3178 break; 3159 break;
3179 case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP: 3160 case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
3180 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3161 btc_alg_dbg(ALGO_TRACE,
3181 "Action 2-Ant, algorithm = PAN+A2DP.\n"); 3162 "Action 2-Ant, algorithm = PAN+A2DP\n");
3182 halbtc8192e2ant_action_pan_edr_a2dp(btcoexist); 3163 halbtc8192e2ant_action_pan_edr_a2dp(btcoexist);
3183 break; 3164 break;
3184 case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID: 3165 case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
3185 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3166 btc_alg_dbg(ALGO_TRACE,
3186 "Action 2-Ant, algorithm = PAN(EDR)+HID.\n"); 3167 "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
3187 halbtc8192e2ant_action_pan_edr_hid(btcoexist); 3168 halbtc8192e2ant_action_pan_edr_hid(btcoexist);
3188 break; 3169 break;
3189 case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR: 3170 case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
3190 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3171 btc_alg_dbg(ALGO_TRACE,
3191 "Action 2-Ant, algorithm = HID+A2DP+PAN.\n"); 3172 "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
3192 btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist); 3173 btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist);
3193 break; 3174 break;
3194 case BT_8192E_2ANT_COEX_ALGO_HID_A2DP: 3175 case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
3195 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3176 btc_alg_dbg(ALGO_TRACE,
3196 "Action 2-Ant, algorithm = HID+A2DP.\n"); 3177 "Action 2-Ant, algorithm = HID+A2DP\n");
3197 halbtc8192e2ant_action_hid_a2dp(btcoexist); 3178 halbtc8192e2ant_action_hid_a2dp(btcoexist);
3198 break; 3179 break;
3199 default: 3180 default:
3200 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3181 btc_alg_dbg(ALGO_TRACE,
3201 "Action 2-Ant, algorithm = unknown!!\n"); 3182 "Action 2-Ant, algorithm = unknown!!\n");
3202 /* halbtc8192e2ant_coex_alloff(btcoexist); */ 3183 /* halbtc8192e2ant_coex_alloff(btcoexist); */
3203 break; 3184 break;
3204 } 3185 }
@@ -3212,8 +3193,8 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
3212 u16 u16tmp = 0; 3193 u16 u16tmp = 0;
3213 u8 u8tmp = 0; 3194 u8 u8tmp = 0;
3214 3195
3215 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3196 btc_iface_dbg(INTF_INIT,
3216 "[BTCoex], 2Ant Init HW Config!!\n"); 3197 "[BTCoex], 2Ant Init HW Config!!\n");
3217 3198
3218 if (backup) { 3199 if (backup) {
3219 /* backup rf 0x1e value */ 3200 /* backup rf 0x1e value */
@@ -3296,8 +3277,8 @@ void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
3296 3277
3297void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist) 3278void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
3298{ 3279{
3299 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3280 btc_iface_dbg(INTF_INIT,
3300 "[BTCoex], Coex Mechanism Init!!\n"); 3281 "[BTCoex], Coex Mechanism Init!!\n");
3301 halbtc8192e2ant_init_coex_dm(btcoexist); 3282 halbtc8192e2ant_init_coex_dm(btcoexist);
3302} 3283}
3303 3284
@@ -3525,13 +3506,13 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
3525void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) 3506void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
3526{ 3507{
3527 if (BTC_IPS_ENTER == type) { 3508 if (BTC_IPS_ENTER == type) {
3528 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3509 btc_iface_dbg(INTF_NOTIFY,
3529 "[BTCoex], IPS ENTER notify\n"); 3510 "[BTCoex], IPS ENTER notify\n");
3530 coex_sta->under_ips = true; 3511 coex_sta->under_ips = true;
3531 halbtc8192e2ant_coex_alloff(btcoexist); 3512 halbtc8192e2ant_coex_alloff(btcoexist);
3532 } else if (BTC_IPS_LEAVE == type) { 3513 } else if (BTC_IPS_LEAVE == type) {
3533 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3514 btc_iface_dbg(INTF_NOTIFY,
3534 "[BTCoex], IPS LEAVE notify\n"); 3515 "[BTCoex], IPS LEAVE notify\n");
3535 coex_sta->under_ips = false; 3516 coex_sta->under_ips = false;
3536 } 3517 }
3537} 3518}
@@ -3539,12 +3520,12 @@ void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
3539void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) 3520void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
3540{ 3521{
3541 if (BTC_LPS_ENABLE == type) { 3522 if (BTC_LPS_ENABLE == type) {
3542 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3523 btc_iface_dbg(INTF_NOTIFY,
3543 "[BTCoex], LPS ENABLE notify\n"); 3524 "[BTCoex], LPS ENABLE notify\n");
3544 coex_sta->under_lps = true; 3525 coex_sta->under_lps = true;
3545 } else if (BTC_LPS_DISABLE == type) { 3526 } else if (BTC_LPS_DISABLE == type) {
3546 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3527 btc_iface_dbg(INTF_NOTIFY,
3547 "[BTCoex], LPS DISABLE notify\n"); 3528 "[BTCoex], LPS DISABLE notify\n");
3548 coex_sta->under_lps = false; 3529 coex_sta->under_lps = false;
3549 } 3530 }
3550} 3531}
@@ -3552,21 +3533,21 @@ void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
3552void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) 3533void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
3553{ 3534{
3554 if (BTC_SCAN_START == type) 3535 if (BTC_SCAN_START == type)
3555 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3536 btc_iface_dbg(INTF_NOTIFY,
3556 "[BTCoex], SCAN START notify\n"); 3537 "[BTCoex], SCAN START notify\n");
3557 else if (BTC_SCAN_FINISH == type) 3538 else if (BTC_SCAN_FINISH == type)
3558 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3539 btc_iface_dbg(INTF_NOTIFY,
3559 "[BTCoex], SCAN FINISH notify\n"); 3540 "[BTCoex], SCAN FINISH notify\n");
3560} 3541}
3561 3542
3562void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) 3543void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
3563{ 3544{
3564 if (BTC_ASSOCIATE_START == type) 3545 if (BTC_ASSOCIATE_START == type)
3565 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3546 btc_iface_dbg(INTF_NOTIFY,
3566 "[BTCoex], CONNECT START notify\n"); 3547 "[BTCoex], CONNECT START notify\n");
3567 else if (BTC_ASSOCIATE_FINISH == type) 3548 else if (BTC_ASSOCIATE_FINISH == type)
3568 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3549 btc_iface_dbg(INTF_NOTIFY,
3569 "[BTCoex], CONNECT FINISH notify\n"); 3550 "[BTCoex], CONNECT FINISH notify\n");
3570} 3551}
3571 3552
3572void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, 3553void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -3582,11 +3563,11 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
3582 return; 3563 return;
3583 3564
3584 if (BTC_MEDIA_CONNECT == type) 3565 if (BTC_MEDIA_CONNECT == type)
3585 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3566 btc_iface_dbg(INTF_NOTIFY,
3586 "[BTCoex], MEDIA connect notify\n"); 3567 "[BTCoex], MEDIA connect notify\n");
3587 else 3568 else
3588 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3569 btc_iface_dbg(INTF_NOTIFY,
3589 "[BTCoex], MEDIA disconnect notify\n"); 3570 "[BTCoex], MEDIA disconnect notify\n");
3590 3571
3591 /* only 2.4G we need to inform bt the chnl mask */ 3572 /* only 2.4G we need to inform bt the chnl mask */
3592 btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, 3573 btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -3606,10 +3587,10 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
3606 coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; 3587 coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
3607 coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; 3588 coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
3608 3589
3609 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 3590 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
3610 "[BTCoex], FW write 0x66 = 0x%x\n", 3591 "[BTCoex], FW write 0x66 = 0x%x\n",
3611 h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | 3592 h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
3612 h2c_parameter[2]); 3593 h2c_parameter[2]);
3613 3594
3614 btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); 3595 btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
3615} 3596}
@@ -3618,8 +3599,8 @@ void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
3618 u8 type) 3599 u8 type)
3619{ 3600{
3620 if (type == BTC_PACKET_DHCP) 3601 if (type == BTC_PACKET_DHCP)
3621 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3602 btc_iface_dbg(INTF_NOTIFY,
3622 "[BTCoex], DHCP Packet notify\n"); 3603 "[BTCoex], DHCP Packet notify\n");
3623} 3604}
3624 3605
3625void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, 3606void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -3637,19 +3618,19 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
3637 rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW; 3618 rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW;
3638 coex_sta->bt_info_c2h_cnt[rsp_source]++; 3619 coex_sta->bt_info_c2h_cnt[rsp_source]++;
3639 3620
3640 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3621 btc_iface_dbg(INTF_NOTIFY,
3641 "[BTCoex], Bt info[%d], length=%d, hex data = [", 3622 "[BTCoex], Bt info[%d], length=%d, hex data = [",
3642 rsp_source, length); 3623 rsp_source, length);
3643 for (i = 0; i < length; i++) { 3624 for (i = 0; i < length; i++) {
3644 coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; 3625 coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
3645 if (i == 1) 3626 if (i == 1)
3646 bt_info = tmp_buf[i]; 3627 bt_info = tmp_buf[i];
3647 if (i == length-1) 3628 if (i == length-1)
3648 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3629 btc_iface_dbg(INTF_NOTIFY,
3649 "0x%02x]\n", tmp_buf[i]); 3630 "0x%02x]\n", tmp_buf[i]);
3650 else 3631 else
3651 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3632 btc_iface_dbg(INTF_NOTIFY,
3652 "0x%02x, ", tmp_buf[i]); 3633 "0x%02x, ", tmp_buf[i]);
3653 } 3634 }
3654 3635
3655 if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) { 3636 if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) {
@@ -3666,8 +3647,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
3666 * because bt is reset and loss of the info. 3647 * because bt is reset and loss of the info.
3667 */ 3648 */
3668 if ((coex_sta->bt_info_ext & BIT1)) { 3649 if ((coex_sta->bt_info_ext & BIT1)) {
3669 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3650 btc_alg_dbg(ALGO_TRACE,
3670 "bit1, send wifi BW&Chnl to BT!!\n"); 3651 "bit1, send wifi BW&Chnl to BT!!\n");
3671 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, 3652 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
3672 &wifi_connected); 3653 &wifi_connected);
3673 if (wifi_connected) 3654 if (wifi_connected)
@@ -3683,8 +3664,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
3683 if ((coex_sta->bt_info_ext & BIT3)) { 3664 if ((coex_sta->bt_info_ext & BIT3)) {
3684 if (!btcoexist->manual_control && 3665 if (!btcoexist->manual_control &&
3685 !btcoexist->stop_coex_dm) { 3666 !btcoexist->stop_coex_dm) {
3686 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3667 btc_alg_dbg(ALGO_TRACE,
3687 "bit3, BT NOT ignore Wlan active!\n"); 3668 "bit3, BT NOT ignore Wlan active!\n");
3688 halbtc8192e2ant_IgnoreWlanAct(btcoexist, 3669 halbtc8192e2ant_IgnoreWlanAct(btcoexist,
3689 FORCE_EXEC, 3670 FORCE_EXEC,
3690 false); 3671 false);
@@ -3742,25 +3723,25 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
3742 3723
3743 if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) { 3724 if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
3744 coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE; 3725 coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
3745 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3726 btc_alg_dbg(ALGO_TRACE,
3746 "[BTCoex], BT Non-Connected idle!!!\n"); 3727 "[BTCoex], BT Non-Connected idle!!!\n");
3747 } else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) { 3728 } else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) {
3748 coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE; 3729 coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
3749 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3730 btc_alg_dbg(ALGO_TRACE,
3750 "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n"); 3731 "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
3751 } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) || 3732 } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
3752 (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) { 3733 (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
3753 coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY; 3734 coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
3754 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3735 btc_alg_dbg(ALGO_TRACE,
3755 "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n"); 3736 "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
3756 } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) { 3737 } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) {
3757 coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY; 3738 coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
3758 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3739 btc_alg_dbg(ALGO_TRACE,
3759 "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n"); 3740 "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
3760 } else { 3741 } else {
3761 coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX; 3742 coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX;
3762 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3743 btc_alg_dbg(ALGO_TRACE,
3763 "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n"); 3744 "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
3764 } 3745 }
3765 3746
3766 if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || 3747 if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3788,7 +3769,7 @@ void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
3788 3769
3789void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist) 3770void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
3790{ 3771{
3791 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); 3772 btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
3792 3773
3793 halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true); 3774 halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
3794 ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); 3775 ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3801,29 +3782,29 @@ void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
3801 struct btc_board_info *board_info = &btcoexist->board_info; 3782 struct btc_board_info *board_info = &btcoexist->board_info;
3802 struct btc_stack_info *stack_info = &btcoexist->stack_info; 3783 struct btc_stack_info *stack_info = &btcoexist->stack_info;
3803 3784
3804 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3785 btc_alg_dbg(ALGO_TRACE,
3805 "=======================Periodical=======================\n"); 3786 "=======================Periodical=======================\n");
3806 if (dis_ver_info_cnt <= 5) { 3787 if (dis_ver_info_cnt <= 5) {
3807 dis_ver_info_cnt += 1; 3788 dis_ver_info_cnt += 1;
3808 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3789 btc_iface_dbg(INTF_INIT,
3809 "************************************************\n"); 3790 "************************************************\n");
3810 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3791 btc_iface_dbg(INTF_INIT,
3811 "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", 3792 "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
3812 board_info->pg_ant_num, board_info->btdm_ant_num, 3793 board_info->pg_ant_num, board_info->btdm_ant_num,
3813 board_info->btdm_ant_pos); 3794 board_info->btdm_ant_pos);
3814 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3795 btc_iface_dbg(INTF_INIT,
3815 "BT stack/ hci ext ver = %s / %d\n", 3796 "BT stack/ hci ext ver = %s / %d\n",
3816 ((stack_info->profile_notified) ? "Yes" : "No"), 3797 ((stack_info->profile_notified) ? "Yes" : "No"),
3817 stack_info->hci_version); 3798 stack_info->hci_version);
3818 btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, 3799 btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
3819 &bt_patch_ver); 3800 &bt_patch_ver);
3820 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); 3801 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
3821 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3802 btc_iface_dbg(INTF_INIT,
3822 "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", 3803 "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
3823 glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, 3804 glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
3824 fw_ver, bt_patch_ver, bt_patch_ver); 3805 fw_ver, bt_patch_ver, bt_patch_ver);
3825 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3806 btc_iface_dbg(INTF_INIT,
3826 "************************************************\n"); 3807 "************************************************\n");
3827 } 3808 }
3828 3809
3829#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0) 3810#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 7e239d3cea26..16add42a62af 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -74,28 +74,28 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
74 if (bt_rssi >= rssi_thresh + 74 if (bt_rssi >= rssi_thresh +
75 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { 75 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
76 bt_rssi_state = BTC_RSSI_STATE_HIGH; 76 bt_rssi_state = BTC_RSSI_STATE_HIGH;
77 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 77 btc_alg_dbg(ALGO_BT_RSSI_STATE,
78 "[BTCoex], BT Rssi state switch to High\n"); 78 "[BTCoex], BT Rssi state switch to High\n");
79 } else { 79 } else {
80 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; 80 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
81 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 81 btc_alg_dbg(ALGO_BT_RSSI_STATE,
82 "[BTCoex], BT Rssi state stay at Low\n"); 82 "[BTCoex], BT Rssi state stay at Low\n");
83 } 83 }
84 } else { 84 } else {
85 if (bt_rssi < rssi_thresh) { 85 if (bt_rssi < rssi_thresh) {
86 bt_rssi_state = BTC_RSSI_STATE_LOW; 86 bt_rssi_state = BTC_RSSI_STATE_LOW;
87 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 87 btc_alg_dbg(ALGO_BT_RSSI_STATE,
88 "[BTCoex], BT Rssi state switch to Low\n"); 88 "[BTCoex], BT Rssi state switch to Low\n");
89 } else { 89 } else {
90 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 90 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
91 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 91 btc_alg_dbg(ALGO_BT_RSSI_STATE,
92 "[BTCoex], BT Rssi state stay at High\n"); 92 "[BTCoex], BT Rssi state stay at High\n");
93 } 93 }
94 } 94 }
95 } else if (level_num == 3) { 95 } else if (level_num == 3) {
96 if (rssi_thresh > rssi_thresh1) { 96 if (rssi_thresh > rssi_thresh1) {
97 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 97 btc_alg_dbg(ALGO_BT_RSSI_STATE,
98 "[BTCoex], BT Rssi thresh error!!\n"); 98 "[BTCoex], BT Rssi thresh error!!\n");
99 return coex_sta->pre_bt_rssi_state; 99 return coex_sta->pre_bt_rssi_state;
100 } 100 }
101 101
@@ -104,12 +104,12 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
104 if (bt_rssi >= rssi_thresh + 104 if (bt_rssi >= rssi_thresh +
105 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { 105 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
106 bt_rssi_state = BTC_RSSI_STATE_MEDIUM; 106 bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
107 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 107 btc_alg_dbg(ALGO_BT_RSSI_STATE,
108 "[BTCoex], BT Rssi state switch to Medium\n"); 108 "[BTCoex], BT Rssi state switch to Medium\n");
109 } else { 109 } else {
110 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; 110 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
111 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 111 btc_alg_dbg(ALGO_BT_RSSI_STATE,
112 "[BTCoex], BT Rssi state stay at Low\n"); 112 "[BTCoex], BT Rssi state stay at Low\n");
113 } 113 }
114 } else if ((coex_sta->pre_bt_rssi_state == 114 } else if ((coex_sta->pre_bt_rssi_state ==
115 BTC_RSSI_STATE_MEDIUM) || 115 BTC_RSSI_STATE_MEDIUM) ||
@@ -118,26 +118,26 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
118 if (bt_rssi >= rssi_thresh1 + 118 if (bt_rssi >= rssi_thresh1 +
119 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { 119 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
120 bt_rssi_state = BTC_RSSI_STATE_HIGH; 120 bt_rssi_state = BTC_RSSI_STATE_HIGH;
121 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 121 btc_alg_dbg(ALGO_BT_RSSI_STATE,
122 "[BTCoex], BT Rssi state switch to High\n"); 122 "[BTCoex], BT Rssi state switch to High\n");
123 } else if (bt_rssi < rssi_thresh) { 123 } else if (bt_rssi < rssi_thresh) {
124 bt_rssi_state = BTC_RSSI_STATE_LOW; 124 bt_rssi_state = BTC_RSSI_STATE_LOW;
125 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 125 btc_alg_dbg(ALGO_BT_RSSI_STATE,
126 "[BTCoex], BT Rssi state switch to Low\n"); 126 "[BTCoex], BT Rssi state switch to Low\n");
127 } else { 127 } else {
128 bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; 128 bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
129 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 129 btc_alg_dbg(ALGO_BT_RSSI_STATE,
130 "[BTCoex], BT Rssi state stay at Medium\n"); 130 "[BTCoex], BT Rssi state stay at Medium\n");
131 } 131 }
132 } else { 132 } else {
133 if (bt_rssi < rssi_thresh1) { 133 if (bt_rssi < rssi_thresh1) {
134 bt_rssi_state = BTC_RSSI_STATE_MEDIUM; 134 bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
135 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 135 btc_alg_dbg(ALGO_BT_RSSI_STATE,
136 "[BTCoex], BT Rssi state switch to Medium\n"); 136 "[BTCoex], BT Rssi state switch to Medium\n");
137 } else { 137 } else {
138 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 138 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
139 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 139 btc_alg_dbg(ALGO_BT_RSSI_STATE,
140 "[BTCoex], BT Rssi state stay at High\n"); 140 "[BTCoex], BT Rssi state stay at High\n");
141 } 141 }
142 } 142 }
143 } 143 }
@@ -165,32 +165,28 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
165 if (wifi_rssi >= rssi_thresh + 165 if (wifi_rssi >= rssi_thresh +
166 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { 166 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
167 wifi_rssi_state = BTC_RSSI_STATE_HIGH; 167 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
168 BTC_PRINT(BTC_MSG_ALGORITHM, 168 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
169 ALGO_WIFI_RSSI_STATE, 169 "[BTCoex], wifi RSSI state switch to High\n");
170 "[BTCoex], wifi RSSI state switch to High\n");
171 } else { 170 } else {
172 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; 171 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
173 BTC_PRINT(BTC_MSG_ALGORITHM, 172 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
174 ALGO_WIFI_RSSI_STATE, 173 "[BTCoex], wifi RSSI state stay at Low\n");
175 "[BTCoex], wifi RSSI state stay at Low\n");
176 } 174 }
177 } else { 175 } else {
178 if (wifi_rssi < rssi_thresh) { 176 if (wifi_rssi < rssi_thresh) {
179 wifi_rssi_state = BTC_RSSI_STATE_LOW; 177 wifi_rssi_state = BTC_RSSI_STATE_LOW;
180 BTC_PRINT(BTC_MSG_ALGORITHM, 178 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
181 ALGO_WIFI_RSSI_STATE, 179 "[BTCoex], wifi RSSI state switch to Low\n");
182 "[BTCoex], wifi RSSI state switch to Low\n");
183 } else { 180 } else {
184 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 181 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
185 BTC_PRINT(BTC_MSG_ALGORITHM, 182 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
186 ALGO_WIFI_RSSI_STATE, 183 "[BTCoex], wifi RSSI state stay at High\n");
187 "[BTCoex], wifi RSSI state stay at High\n");
188 } 184 }
189 } 185 }
190 } else if (level_num == 3) { 186 } else if (level_num == 3) {
191 if (rssi_thresh > rssi_thresh1) { 187 if (rssi_thresh > rssi_thresh1) {
192 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, 188 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
193 "[BTCoex], wifi RSSI thresh error!!\n"); 189 "[BTCoex], wifi RSSI thresh error!!\n");
194 return coex_sta->pre_wifi_rssi_state[index]; 190 return coex_sta->pre_wifi_rssi_state[index];
195 } 191 }
196 192
@@ -201,14 +197,12 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
201 if (wifi_rssi >= rssi_thresh + 197 if (wifi_rssi >= rssi_thresh +
202 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { 198 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
203 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; 199 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
204 BTC_PRINT(BTC_MSG_ALGORITHM, 200 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
205 ALGO_WIFI_RSSI_STATE, 201 "[BTCoex], wifi RSSI state switch to Medium\n");
206 "[BTCoex], wifi RSSI state switch to Medium\n");
207 } else { 202 } else {
208 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; 203 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
209 BTC_PRINT(BTC_MSG_ALGORITHM, 204 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
210 ALGO_WIFI_RSSI_STATE, 205 "[BTCoex], wifi RSSI state stay at Low\n");
211 "[BTCoex], wifi RSSI state stay at Low\n");
212 } 206 }
213 } else if ((coex_sta->pre_wifi_rssi_state[index] == 207 } else if ((coex_sta->pre_wifi_rssi_state[index] ==
214 BTC_RSSI_STATE_MEDIUM) || 208 BTC_RSSI_STATE_MEDIUM) ||
@@ -217,31 +211,26 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
217 if (wifi_rssi >= rssi_thresh1 + 211 if (wifi_rssi >= rssi_thresh1 +
218 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { 212 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
219 wifi_rssi_state = BTC_RSSI_STATE_HIGH; 213 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
220 BTC_PRINT(BTC_MSG_ALGORITHM, 214 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
221 ALGO_WIFI_RSSI_STATE, 215 "[BTCoex], wifi RSSI state switch to High\n");
222 "[BTCoex], wifi RSSI state switch to High\n");
223 } else if (wifi_rssi < rssi_thresh) { 216 } else if (wifi_rssi < rssi_thresh) {
224 wifi_rssi_state = BTC_RSSI_STATE_LOW; 217 wifi_rssi_state = BTC_RSSI_STATE_LOW;
225 BTC_PRINT(BTC_MSG_ALGORITHM, 218 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
226 ALGO_WIFI_RSSI_STATE, 219 "[BTCoex], wifi RSSI state switch to Low\n");
227 "[BTCoex], wifi RSSI state switch to Low\n");
228 } else { 220 } else {
229 wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; 221 wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
230 BTC_PRINT(BTC_MSG_ALGORITHM, 222 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
231 ALGO_WIFI_RSSI_STATE, 223 "[BTCoex], wifi RSSI state stay at Medium\n");
232 "[BTCoex], wifi RSSI state stay at Medium\n");
233 } 224 }
234 } else { 225 } else {
235 if (wifi_rssi < rssi_thresh1) { 226 if (wifi_rssi < rssi_thresh1) {
236 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; 227 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
237 BTC_PRINT(BTC_MSG_ALGORITHM, 228 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
238 ALGO_WIFI_RSSI_STATE, 229 "[BTCoex], wifi RSSI state switch to Medium\n");
239 "[BTCoex], wifi RSSI state switch to Medium\n");
240 } else { 230 } else {
241 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 231 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
242 BTC_PRINT(BTC_MSG_ALGORITHM, 232 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
243 ALGO_WIFI_RSSI_STATE, 233 "[BTCoex], wifi RSSI state stay at High\n");
244 "[BTCoex], wifi RSSI state stay at High\n");
245 } 234 }
246 } 235 }
247 } 236 }
@@ -435,9 +424,9 @@ static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
435 424
436 h2c_parameter[0] |= BIT0; /* trigger*/ 425 h2c_parameter[0] |= BIT0; /* trigger*/
437 426
438 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 427 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
439 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", 428 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
440 h2c_parameter[0]); 429 h2c_parameter[0]);
441 430
442 btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); 431 btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
443} 432}
@@ -532,8 +521,8 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
532 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); 521 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
533 522
534 if (!bt_link_info->bt_link_exist) { 523 if (!bt_link_info->bt_link_exist) {
535 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 524 btc_alg_dbg(ALGO_TRACE,
536 "[BTCoex], No BT link exists!!!\n"); 525 "[BTCoex], No BT link exists!!!\n");
537 return algorithm; 526 return algorithm;
538 } 527 }
539 528
@@ -548,27 +537,27 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
548 537
549 if (numdiffprofile == 1) { 538 if (numdiffprofile == 1) {
550 if (bt_link_info->sco_exist) { 539 if (bt_link_info->sco_exist) {
551 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 540 btc_alg_dbg(ALGO_TRACE,
552 "[BTCoex], BT Profile = SCO only\n"); 541 "[BTCoex], BT Profile = SCO only\n");
553 algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; 542 algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
554 } else { 543 } else {
555 if (bt_link_info->hid_exist) { 544 if (bt_link_info->hid_exist) {
556 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 545 btc_alg_dbg(ALGO_TRACE,
557 "[BTCoex], BT Profile = HID only\n"); 546 "[BTCoex], BT Profile = HID only\n");
558 algorithm = BT_8723B_1ANT_COEX_ALGO_HID; 547 algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
559 } else if (bt_link_info->a2dp_exist) { 548 } else if (bt_link_info->a2dp_exist) {
560 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 549 btc_alg_dbg(ALGO_TRACE,
561 "[BTCoex], BT Profile = A2DP only\n"); 550 "[BTCoex], BT Profile = A2DP only\n");
562 algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP; 551 algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
563 } else if (bt_link_info->pan_exist) { 552 } else if (bt_link_info->pan_exist) {
564 if (bt_hs_on) { 553 if (bt_hs_on) {
565 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 554 btc_alg_dbg(ALGO_TRACE,
566 "[BTCoex], BT Profile = PAN(HS) only\n"); 555 "[BTCoex], BT Profile = PAN(HS) only\n");
567 algorithm = 556 algorithm =
568 BT_8723B_1ANT_COEX_ALGO_PANHS; 557 BT_8723B_1ANT_COEX_ALGO_PANHS;
569 } else { 558 } else {
570 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 559 btc_alg_dbg(ALGO_TRACE,
571 "[BTCoex], BT Profile = PAN(EDR) only\n"); 560 "[BTCoex], BT Profile = PAN(EDR) only\n");
572 algorithm = 561 algorithm =
573 BT_8723B_1ANT_COEX_ALGO_PANEDR; 562 BT_8723B_1ANT_COEX_ALGO_PANEDR;
574 } 563 }
@@ -577,21 +566,21 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
577 } else if (numdiffprofile == 2) { 566 } else if (numdiffprofile == 2) {
578 if (bt_link_info->sco_exist) { 567 if (bt_link_info->sco_exist) {
579 if (bt_link_info->hid_exist) { 568 if (bt_link_info->hid_exist) {
580 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 569 btc_alg_dbg(ALGO_TRACE,
581 "[BTCoex], BT Profile = SCO + HID\n"); 570 "[BTCoex], BT Profile = SCO + HID\n");
582 algorithm = BT_8723B_1ANT_COEX_ALGO_HID; 571 algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
583 } else if (bt_link_info->a2dp_exist) { 572 } else if (bt_link_info->a2dp_exist) {
584 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 573 btc_alg_dbg(ALGO_TRACE,
585 "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); 574 "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
586 algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; 575 algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
587 } else if (bt_link_info->pan_exist) { 576 } else if (bt_link_info->pan_exist) {
588 if (bt_hs_on) { 577 if (bt_hs_on) {
589 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 578 btc_alg_dbg(ALGO_TRACE,
590 "[BTCoex], BT Profile = SCO + PAN(HS)\n"); 579 "[BTCoex], BT Profile = SCO + PAN(HS)\n");
591 algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; 580 algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
592 } else { 581 } else {
593 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 582 btc_alg_dbg(ALGO_TRACE,
594 "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); 583 "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
595 algorithm = 584 algorithm =
596 BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; 585 BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
597 } 586 }
@@ -599,32 +588,32 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
599 } else { 588 } else {
600 if (bt_link_info->hid_exist && 589 if (bt_link_info->hid_exist &&
601 bt_link_info->a2dp_exist) { 590 bt_link_info->a2dp_exist) {
602 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 591 btc_alg_dbg(ALGO_TRACE,
603 "[BTCoex], BT Profile = HID + A2DP\n"); 592 "[BTCoex], BT Profile = HID + A2DP\n");
604 algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP; 593 algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
605 } else if (bt_link_info->hid_exist && 594 } else if (bt_link_info->hid_exist &&
606 bt_link_info->pan_exist) { 595 bt_link_info->pan_exist) {
607 if (bt_hs_on) { 596 if (bt_hs_on) {
608 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 597 btc_alg_dbg(ALGO_TRACE,
609 "[BTCoex], BT Profile = HID + PAN(HS)\n"); 598 "[BTCoex], BT Profile = HID + PAN(HS)\n");
610 algorithm = 599 algorithm =
611 BT_8723B_1ANT_COEX_ALGO_HID_A2DP; 600 BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
612 } else { 601 } else {
613 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 602 btc_alg_dbg(ALGO_TRACE,
614 "[BTCoex], BT Profile = HID + PAN(EDR)\n"); 603 "[BTCoex], BT Profile = HID + PAN(EDR)\n");
615 algorithm = 604 algorithm =
616 BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; 605 BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
617 } 606 }
618 } else if (bt_link_info->pan_exist && 607 } else if (bt_link_info->pan_exist &&
619 bt_link_info->a2dp_exist) { 608 bt_link_info->a2dp_exist) {
620 if (bt_hs_on) { 609 if (bt_hs_on) {
621 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 610 btc_alg_dbg(ALGO_TRACE,
622 "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); 611 "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
623 algorithm = 612 algorithm =
624 BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS; 613 BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
625 } else { 614 } else {
626 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 615 btc_alg_dbg(ALGO_TRACE,
627 "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); 616 "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
628 algorithm = 617 algorithm =
629 BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP; 618 BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
630 } 619 }
@@ -634,31 +623,31 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
634 if (bt_link_info->sco_exist) { 623 if (bt_link_info->sco_exist) {
635 if (bt_link_info->hid_exist && 624 if (bt_link_info->hid_exist &&
636 bt_link_info->a2dp_exist) { 625 bt_link_info->a2dp_exist) {
637 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 626 btc_alg_dbg(ALGO_TRACE,
638 "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); 627 "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
639 algorithm = BT_8723B_1ANT_COEX_ALGO_HID; 628 algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
640 } else if (bt_link_info->hid_exist && 629 } else if (bt_link_info->hid_exist &&
641 bt_link_info->pan_exist) { 630 bt_link_info->pan_exist) {
642 if (bt_hs_on) { 631 if (bt_hs_on) {
643 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 632 btc_alg_dbg(ALGO_TRACE,
644 "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); 633 "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
645 algorithm = 634 algorithm =
646 BT_8723B_1ANT_COEX_ALGO_HID_A2DP; 635 BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
647 } else { 636 } else {
648 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 637 btc_alg_dbg(ALGO_TRACE,
649 "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); 638 "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
650 algorithm = 639 algorithm =
651 BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; 640 BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
652 } 641 }
653 } else if (bt_link_info->pan_exist && 642 } else if (bt_link_info->pan_exist &&
654 bt_link_info->a2dp_exist) { 643 bt_link_info->a2dp_exist) {
655 if (bt_hs_on) { 644 if (bt_hs_on) {
656 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 645 btc_alg_dbg(ALGO_TRACE,
657 "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); 646 "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
658 algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; 647 algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
659 } else { 648 } else {
660 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 649 btc_alg_dbg(ALGO_TRACE,
661 "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); 650 "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
662 algorithm = 651 algorithm =
663 BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; 652 BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
664 } 653 }
@@ -668,13 +657,13 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
668 bt_link_info->pan_exist && 657 bt_link_info->pan_exist &&
669 bt_link_info->a2dp_exist) { 658 bt_link_info->a2dp_exist) {
670 if (bt_hs_on) { 659 if (bt_hs_on) {
671 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 660 btc_alg_dbg(ALGO_TRACE,
672 "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); 661 "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
673 algorithm = 662 algorithm =
674 BT_8723B_1ANT_COEX_ALGO_HID_A2DP; 663 BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
675 } else { 664 } else {
676 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 665 btc_alg_dbg(ALGO_TRACE,
677 "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); 666 "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
678 algorithm = 667 algorithm =
679 BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR; 668 BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
680 } 669 }
@@ -686,11 +675,11 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
686 bt_link_info->pan_exist && 675 bt_link_info->pan_exist &&
687 bt_link_info->a2dp_exist) { 676 bt_link_info->a2dp_exist) {
688 if (bt_hs_on) { 677 if (bt_hs_on) {
689 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 678 btc_alg_dbg(ALGO_TRACE,
690 "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); 679 "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
691 } else { 680 } else {
692 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 681 btc_alg_dbg(ALGO_TRACE,
693 "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); 682 "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
694 algorithm = 683 algorithm =
695 BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; 684 BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
696 } 685 }
@@ -717,9 +706,9 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
717 h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */ 706 h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */
718 } 707 }
719 708
720 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 709 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
721 "[BTCoex], set WiFi Low-Penalty Retry: %s", 710 "[BTCoex], set WiFi Low-Penalty Retry: %s",
722 (low_penalty_ra ? "ON!!" : "OFF!!")); 711 (low_penalty_ra ? "ON!!" : "OFF!!"));
723 712
724 btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); 713 btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
725} 714}
@@ -743,20 +732,20 @@ static void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist,
743 u32 val0x6c0, u32 val0x6c4, 732 u32 val0x6c0, u32 val0x6c4,
744 u32 val0x6c8, u8 val0x6cc) 733 u32 val0x6c8, u8 val0x6cc)
745{ 734{
746 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 735 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
747 "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); 736 "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
748 btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); 737 btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
749 738
750 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 739 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
751 "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); 740 "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
752 btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); 741 btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
753 742
754 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 743 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
755 "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); 744 "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
756 btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); 745 btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
757 746
758 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 747 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
759 "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); 748 "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
760 btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); 749 btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
761} 750}
762 751
@@ -765,10 +754,10 @@ static void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist,
765 u32 val0x6c4, u32 val0x6c8, 754 u32 val0x6c4, u32 val0x6c8,
766 u8 val0x6cc) 755 u8 val0x6cc)
767{ 756{
768 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 757 btc_alg_dbg(ALGO_TRACE_SW,
769 "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n", 758 "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
770 (force_exec ? "force to" : ""), 759 (force_exec ? "force to" : ""),
771 val0x6c0, val0x6c4, val0x6cc); 760 val0x6c0, val0x6c4, val0x6cc);
772 coex_dm->cur_val0x6c0 = val0x6c0; 761 coex_dm->cur_val0x6c0 = val0x6c0;
773 coex_dm->cur_val0x6c4 = val0x6c4; 762 coex_dm->cur_val0x6c4 = val0x6c4;
774 coex_dm->cur_val0x6c8 = val0x6c8; 763 coex_dm->cur_val0x6c8 = val0x6c8;
@@ -839,9 +828,9 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
839 if (enable) 828 if (enable)
840 h2c_parameter[0] |= BIT0; /* function enable */ 829 h2c_parameter[0] |= BIT0; /* function enable */
841 830
842 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 831 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
843 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", 832 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
844 h2c_parameter[0]); 833 h2c_parameter[0]);
845 834
846 btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); 835 btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
847} 836}
@@ -849,16 +838,16 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
849static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist, 838static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
850 bool force_exec, bool enable) 839 bool force_exec, bool enable)
851{ 840{
852 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 841 btc_alg_dbg(ALGO_TRACE_FW,
853 "[BTCoex], %s turn Ignore WlanAct %s\n", 842 "[BTCoex], %s turn Ignore WlanAct %s\n",
854 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); 843 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
855 coex_dm->cur_ignore_wlan_act = enable; 844 coex_dm->cur_ignore_wlan_act = enable;
856 845
857 if (!force_exec) { 846 if (!force_exec) {
858 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 847 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
859 "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", 848 "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
860 coex_dm->pre_ignore_wlan_act, 849 coex_dm->pre_ignore_wlan_act,
861 coex_dm->cur_ignore_wlan_act); 850 coex_dm->cur_ignore_wlan_act);
862 851
863 if (coex_dm->pre_ignore_wlan_act == 852 if (coex_dm->pre_ignore_wlan_act ==
864 coex_dm->cur_ignore_wlan_act) 853 coex_dm->cur_ignore_wlan_act)
@@ -882,8 +871,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
882 871
883 if (ap_enable) { 872 if (ap_enable) {
884 if ((byte1 & BIT4) && !(byte1 & BIT5)) { 873 if ((byte1 & BIT4) && !(byte1 & BIT5)) {
885 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 874 btc_iface_dbg(INTF_NOTIFY,
886 "[BTCoex], FW for 1Ant AP mode\n"); 875 "[BTCoex], FW for 1Ant AP mode\n");
887 real_byte1 &= ~BIT4; 876 real_byte1 &= ~BIT4;
888 real_byte1 |= BIT5; 877 real_byte1 |= BIT5;
889 878
@@ -904,13 +893,13 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
904 coex_dm->ps_tdma_para[3] = byte4; 893 coex_dm->ps_tdma_para[3] = byte4;
905 coex_dm->ps_tdma_para[4] = real_byte5; 894 coex_dm->ps_tdma_para[4] = real_byte5;
906 895
907 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 896 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
908 "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", 897 "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
909 h2c_parameter[0], 898 h2c_parameter[0],
910 h2c_parameter[1] << 24 | 899 h2c_parameter[1] << 24 |
911 h2c_parameter[2] << 16 | 900 h2c_parameter[2] << 16 |
912 h2c_parameter[3] << 8 | 901 h2c_parameter[3] << 8 |
913 h2c_parameter[4]); 902 h2c_parameter[4]);
914 903
915 btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); 904 btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
916} 905}
@@ -929,22 +918,22 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
929 bool force_exec, 918 bool force_exec,
930 u8 lps_val, u8 rpwm_val) 919 u8 lps_val, u8 rpwm_val)
931{ 920{
932 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 921 btc_alg_dbg(ALGO_TRACE_FW,
933 "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n", 922 "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
934 (force_exec ? "force to" : ""), lps_val, rpwm_val); 923 (force_exec ? "force to" : ""), lps_val, rpwm_val);
935 coex_dm->cur_lps = lps_val; 924 coex_dm->cur_lps = lps_val;
936 coex_dm->cur_rpwm = rpwm_val; 925 coex_dm->cur_rpwm = rpwm_val;
937 926
938 if (!force_exec) { 927 if (!force_exec) {
939 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 928 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
940 "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n", 929 "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
941 coex_dm->cur_lps, coex_dm->cur_rpwm); 930 coex_dm->cur_lps, coex_dm->cur_rpwm);
942 931
943 if ((coex_dm->pre_lps == coex_dm->cur_lps) && 932 if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
944 (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) { 933 (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
945 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 934 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
946 "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n", 935 "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
947 coex_dm->pre_rpwm, coex_dm->cur_rpwm); 936 coex_dm->pre_rpwm, coex_dm->cur_rpwm);
948 937
949 return; 938 return;
950 } 939 }
@@ -958,8 +947,8 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
958static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist, 947static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist,
959 bool low_penalty_ra) 948 bool low_penalty_ra)
960{ 949{
961 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 950 btc_alg_dbg(ALGO_BT_MONITOR,
962 "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); 951 "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
963 952
964 halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); 953 halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
965} 954}
@@ -1174,13 +1163,13 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
1174 1163
1175 if (!force_exec) { 1164 if (!force_exec) {
1176 if (coex_dm->cur_ps_tdma_on) 1165 if (coex_dm->cur_ps_tdma_on)
1177 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1166 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1178 "[BTCoex], ******** TDMA(on, %d) *********\n", 1167 "[BTCoex], ******** TDMA(on, %d) *********\n",
1179 coex_dm->cur_ps_tdma); 1168 coex_dm->cur_ps_tdma);
1180 else 1169 else
1181 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1170 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1182 "[BTCoex], ******** TDMA(off, %d) ********\n", 1171 "[BTCoex], ******** TDMA(off, %d) ********\n",
1183 coex_dm->cur_ps_tdma); 1172 coex_dm->cur_ps_tdma);
1184 1173
1185 if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && 1174 if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
1186 (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) 1175 (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1394,45 +1383,45 @@ static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
1394 1383
1395 if (!wifi_connected && 1384 if (!wifi_connected &&
1396 BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) { 1385 BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) {
1397 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1386 btc_alg_dbg(ALGO_TRACE,
1398 "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); 1387 "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
1399 halbtc8723b1ant_sw_mechanism(btcoexist, false); 1388 halbtc8723b1ant_sw_mechanism(btcoexist, false);
1400 commom = true; 1389 commom = true;
1401 } else if (wifi_connected && 1390 } else if (wifi_connected &&
1402 (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == 1391 (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
1403 coex_dm->bt_status)) { 1392 coex_dm->bt_status)) {
1404 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1393 btc_alg_dbg(ALGO_TRACE,
1405 "[BTCoex], Wifi connected + BT non connected-idle!!\n"); 1394 "[BTCoex], Wifi connected + BT non connected-idle!!\n");
1406 halbtc8723b1ant_sw_mechanism(btcoexist, false); 1395 halbtc8723b1ant_sw_mechanism(btcoexist, false);
1407 commom = true; 1396 commom = true;
1408 } else if (!wifi_connected && 1397 } else if (!wifi_connected &&
1409 (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == 1398 (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
1410 coex_dm->bt_status)) { 1399 coex_dm->bt_status)) {
1411 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1400 btc_alg_dbg(ALGO_TRACE,
1412 "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); 1401 "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
1413 halbtc8723b1ant_sw_mechanism(btcoexist, false); 1402 halbtc8723b1ant_sw_mechanism(btcoexist, false);
1414 commom = true; 1403 commom = true;
1415 } else if (wifi_connected && 1404 } else if (wifi_connected &&
1416 (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == 1405 (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
1417 coex_dm->bt_status)) { 1406 coex_dm->bt_status)) {
1418 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1407 btc_alg_dbg(ALGO_TRACE,
1419 "[BTCoex], Wifi connected + BT connected-idle!!\n"); 1408 "[BTCoex], Wifi connected + BT connected-idle!!\n");
1420 halbtc8723b1ant_sw_mechanism(btcoexist, false); 1409 halbtc8723b1ant_sw_mechanism(btcoexist, false);
1421 commom = true; 1410 commom = true;
1422 } else if (!wifi_connected && 1411 } else if (!wifi_connected &&
1423 (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE != 1412 (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE !=
1424 coex_dm->bt_status)) { 1413 coex_dm->bt_status)) {
1425 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1414 btc_alg_dbg(ALGO_TRACE,
1426 ("[BTCoex], Wifi non connected-idle + BT Busy!!\n")); 1415 "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
1427 halbtc8723b1ant_sw_mechanism(btcoexist, false); 1416 halbtc8723b1ant_sw_mechanism(btcoexist, false);
1428 commom = true; 1417 commom = true;
1429 } else { 1418 } else {
1430 if (wifi_busy) 1419 if (wifi_busy)
1431 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1420 btc_alg_dbg(ALGO_TRACE,
1432 "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); 1421 "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
1433 else 1422 else
1434 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1423 btc_alg_dbg(ALGO_TRACE,
1435 "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); 1424 "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
1436 1425
1437 commom = false; 1426 commom = false;
1438 } 1427 }
@@ -1451,8 +1440,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
1451 u8 retry_count = 0, bt_info_ext; 1440 u8 retry_count = 0, bt_info_ext;
1452 bool wifi_busy = false; 1441 bool wifi_busy = false;
1453 1442
1454 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 1443 btc_alg_dbg(ALGO_TRACE_FW,
1455 "[BTCoex], TdmaDurationAdjustForAcl()\n"); 1444 "[BTCoex], TdmaDurationAdjustForAcl()\n");
1456 1445
1457 if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status) 1446 if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status)
1458 wifi_busy = true; 1447 wifi_busy = true;
@@ -1481,8 +1470,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
1481 1470
1482 if (!coex_dm->auto_tdma_adjust) { 1471 if (!coex_dm->auto_tdma_adjust) {
1483 coex_dm->auto_tdma_adjust = true; 1472 coex_dm->auto_tdma_adjust = true;
1484 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1473 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1485 "[BTCoex], first run TdmaDurationAdjust()!!\n"); 1474 "[BTCoex], first run TdmaDurationAdjust()!!\n");
1486 1475
1487 halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); 1476 halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
1488 coex_dm->tdma_adj_type = 2; 1477 coex_dm->tdma_adj_type = 2;
@@ -1513,9 +1502,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
1513 up = 0; 1502 up = 0;
1514 dn = 0; 1503 dn = 0;
1515 result = 1; 1504 result = 1;
1516 BTC_PRINT(BTC_MSG_ALGORITHM, 1505 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1517 ALGO_TRACE_FW_DETAIL, 1506 "[BTCoex], Increase wifi duration!!\n");
1518 "[BTCoex], Increase wifi duration!!\n");
1519 } 1507 }
1520 } else if (retry_count <= 3) { 1508 } else if (retry_count <= 3) {
1521 up--; 1509 up--;
@@ -1538,9 +1526,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
1538 dn = 0; 1526 dn = 0;
1539 wait_count = 0; 1527 wait_count = 0;
1540 result = -1; 1528 result = -1;
1541 BTC_PRINT(BTC_MSG_ALGORITHM, 1529 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1542 ALGO_TRACE_FW_DETAIL, 1530 "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
1543 "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
1544 } 1531 }
1545 } else { 1532 } else {
1546 if (wait_count == 1) 1533 if (wait_count == 1)
@@ -1556,8 +1543,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
1556 dn = 0; 1543 dn = 0;
1557 wait_count = 0; 1544 wait_count = 0;
1558 result = -1; 1545 result = -1;
1559 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1546 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1560 "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); 1547 "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
1561 } 1548 }
1562 1549
1563 if (result == -1) { 1550 if (result == -1) {
@@ -1602,9 +1589,9 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
1602 } 1589 }
1603 } else { /*no change */ 1590 } else { /*no change */
1604 /*if busy / idle change */ 1591 /*if busy / idle change */
1605 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1592 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1606 "[BTCoex],********* TDMA(on, %d) ********\n", 1593 "[BTCoex],********* TDMA(on, %d) ********\n",
1607 coex_dm->cur_ps_tdma); 1594 coex_dm->cur_ps_tdma);
1608 } 1595 }
1609 1596
1610 if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 && 1597 if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
@@ -2010,15 +1997,15 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
2010 bool scan = false, link = false, roam = false; 1997 bool scan = false, link = false, roam = false;
2011 bool under_4way = false, ap_enable = false; 1998 bool under_4way = false, ap_enable = false;
2012 1999
2013 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2000 btc_alg_dbg(ALGO_TRACE,
2014 "[BTCoex], CoexForWifiConnect()===>\n"); 2001 "[BTCoex], CoexForWifiConnect()===>\n");
2015 2002
2016 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, 2003 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
2017 &under_4way); 2004 &under_4way);
2018 if (under_4way) { 2005 if (under_4way) {
2019 halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist); 2006 halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
2020 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2007 btc_alg_dbg(ALGO_TRACE,
2021 "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"); 2008 "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
2022 return; 2009 return;
2023 } 2010 }
2024 2011
@@ -2032,8 +2019,8 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
2032 else 2019 else
2033 halbtc8723b1ant_action_wifi_connected_special_packet( 2020 halbtc8723b1ant_action_wifi_connected_special_packet(
2034 btcoexist); 2021 btcoexist);
2035 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2022 btc_alg_dbg(ALGO_TRACE,
2036 "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); 2023 "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
2037 return; 2024 return;
2038 } 2025 }
2039 2026
@@ -2102,58 +2089,58 @@ static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
2102 if (!halbtc8723b1ant_is_common_action(btcoexist)) { 2089 if (!halbtc8723b1ant_is_common_action(btcoexist)) {
2103 switch (coex_dm->cur_algorithm) { 2090 switch (coex_dm->cur_algorithm) {
2104 case BT_8723B_1ANT_COEX_ALGO_SCO: 2091 case BT_8723B_1ANT_COEX_ALGO_SCO:
2105 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2092 btc_alg_dbg(ALGO_TRACE,
2106 "[BTCoex], Action algorithm = SCO.\n"); 2093 "[BTCoex], Action algorithm = SCO\n");
2107 halbtc8723b1ant_action_sco(btcoexist); 2094 halbtc8723b1ant_action_sco(btcoexist);
2108 break; 2095 break;
2109 case BT_8723B_1ANT_COEX_ALGO_HID: 2096 case BT_8723B_1ANT_COEX_ALGO_HID:
2110 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2097 btc_alg_dbg(ALGO_TRACE,
2111 "[BTCoex], Action algorithm = HID.\n"); 2098 "[BTCoex], Action algorithm = HID\n");
2112 halbtc8723b1ant_action_hid(btcoexist); 2099 halbtc8723b1ant_action_hid(btcoexist);
2113 break; 2100 break;
2114 case BT_8723B_1ANT_COEX_ALGO_A2DP: 2101 case BT_8723B_1ANT_COEX_ALGO_A2DP:
2115 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2102 btc_alg_dbg(ALGO_TRACE,
2116 "[BTCoex], Action algorithm = A2DP.\n"); 2103 "[BTCoex], Action algorithm = A2DP\n");
2117 halbtc8723b1ant_action_a2dp(btcoexist); 2104 halbtc8723b1ant_action_a2dp(btcoexist);
2118 break; 2105 break;
2119 case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS: 2106 case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
2120 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2107 btc_alg_dbg(ALGO_TRACE,
2121 "[BTCoex], Action algorithm = A2DP+PAN(HS).\n"); 2108 "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
2122 halbtc8723b1ant_action_a2dp_pan_hs(btcoexist); 2109 halbtc8723b1ant_action_a2dp_pan_hs(btcoexist);
2123 break; 2110 break;
2124 case BT_8723B_1ANT_COEX_ALGO_PANEDR: 2111 case BT_8723B_1ANT_COEX_ALGO_PANEDR:
2125 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2112 btc_alg_dbg(ALGO_TRACE,
2126 "[BTCoex], Action algorithm = PAN(EDR).\n"); 2113 "[BTCoex], Action algorithm = PAN(EDR)\n");
2127 halbtc8723b1ant_action_pan_edr(btcoexist); 2114 halbtc8723b1ant_action_pan_edr(btcoexist);
2128 break; 2115 break;
2129 case BT_8723B_1ANT_COEX_ALGO_PANHS: 2116 case BT_8723B_1ANT_COEX_ALGO_PANHS:
2130 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2117 btc_alg_dbg(ALGO_TRACE,
2131 "[BTCoex], Action algorithm = HS mode.\n"); 2118 "[BTCoex], Action algorithm = HS mode\n");
2132 halbtc8723b1ant_action_pan_hs(btcoexist); 2119 halbtc8723b1ant_action_pan_hs(btcoexist);
2133 break; 2120 break;
2134 case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP: 2121 case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
2135 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2122 btc_alg_dbg(ALGO_TRACE,
2136 "[BTCoex], Action algorithm = PAN+A2DP.\n"); 2123 "[BTCoex], Action algorithm = PAN+A2DP\n");
2137 halbtc8723b1ant_action_pan_edr_a2dp(btcoexist); 2124 halbtc8723b1ant_action_pan_edr_a2dp(btcoexist);
2138 break; 2125 break;
2139 case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID: 2126 case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
2140 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2127 btc_alg_dbg(ALGO_TRACE,
2141 "[BTCoex], Action algorithm = PAN(EDR)+HID.\n"); 2128 "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
2142 halbtc8723b1ant_action_pan_edr_hid(btcoexist); 2129 halbtc8723b1ant_action_pan_edr_hid(btcoexist);
2143 break; 2130 break;
2144 case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR: 2131 case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
2145 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2132 btc_alg_dbg(ALGO_TRACE,
2146 "[BTCoex], Action algorithm = HID+A2DP+PAN.\n"); 2133 "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
2147 btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist); 2134 btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist);
2148 break; 2135 break;
2149 case BT_8723B_1ANT_COEX_ALGO_HID_A2DP: 2136 case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
2150 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2137 btc_alg_dbg(ALGO_TRACE,
2151 "[BTCoex], Action algorithm = HID+A2DP.\n"); 2138 "[BTCoex], Action algorithm = HID+A2DP\n");
2152 halbtc8723b1ant_action_hid_a2dp(btcoexist); 2139 halbtc8723b1ant_action_hid_a2dp(btcoexist);
2153 break; 2140 break;
2154 default: 2141 default:
2155 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2142 btc_alg_dbg(ALGO_TRACE,
2156 "[BTCoex], Action algorithm = coexist All Off!!\n"); 2143 "[BTCoex], Action algorithm = coexist All Off!!\n");
2157 break; 2144 break;
2158 } 2145 }
2159 coex_dm->pre_algorithm = coex_dm->cur_algorithm; 2146 coex_dm->pre_algorithm = coex_dm->cur_algorithm;
@@ -2171,24 +2158,24 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
2171 u32 wifi_link_status = 0; 2158 u32 wifi_link_status = 0;
2172 u32 num_of_wifi_link = 0; 2159 u32 num_of_wifi_link = 0;
2173 2160
2174 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2161 btc_alg_dbg(ALGO_TRACE,
2175 "[BTCoex], RunCoexistMechanism()===>\n"); 2162 "[BTCoex], RunCoexistMechanism()===>\n");
2176 2163
2177 if (btcoexist->manual_control) { 2164 if (btcoexist->manual_control) {
2178 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2165 btc_alg_dbg(ALGO_TRACE,
2179 "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); 2166 "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
2180 return; 2167 return;
2181 } 2168 }
2182 2169
2183 if (btcoexist->stop_coex_dm) { 2170 if (btcoexist->stop_coex_dm) {
2184 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2171 btc_alg_dbg(ALGO_TRACE,
2185 "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"); 2172 "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
2186 return; 2173 return;
2187 } 2174 }
2188 2175
2189 if (coex_sta->under_ips) { 2176 if (coex_sta->under_ips) {
2190 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2177 btc_alg_dbg(ALGO_TRACE,
2191 "[BTCoex], wifi is under IPS !!!\n"); 2178 "[BTCoex], wifi is under IPS !!!\n");
2192 return; 2179 return;
2193 } 2180 }
2194 2181
@@ -2267,8 +2254,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
2267 if (!wifi_connected) { 2254 if (!wifi_connected) {
2268 bool scan = false, link = false, roam = false; 2255 bool scan = false, link = false, roam = false;
2269 2256
2270 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2257 btc_alg_dbg(ALGO_TRACE,
2271 "[BTCoex], wifi is non connected-idle !!!\n"); 2258 "[BTCoex], wifi is non connected-idle !!!\n");
2272 2259
2273 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); 2260 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
2274 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); 2261 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2305,8 +2292,8 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
2305 u8 u8tmp = 0; 2292 u8 u8tmp = 0;
2306 u32 cnt_bt_cal_chk = 0; 2293 u32 cnt_bt_cal_chk = 0;
2307 2294
2308 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 2295 btc_iface_dbg(INTF_INIT,
2309 "[BTCoex], 1Ant Init HW Config!!\n"); 2296 "[BTCoex], 1Ant Init HW Config!!\n");
2310 2297
2311 if (backup) {/* backup rf 0x1e value */ 2298 if (backup) {/* backup rf 0x1e value */
2312 coex_dm->backup_arfr_cnt1 = 2299 coex_dm->backup_arfr_cnt1 =
@@ -2333,14 +2320,14 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
2333 u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d); 2320 u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d);
2334 cnt_bt_cal_chk++; 2321 cnt_bt_cal_chk++;
2335 if (u32tmp & BIT0) { 2322 if (u32tmp & BIT0) {
2336 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 2323 btc_iface_dbg(INTF_INIT,
2337 "[BTCoex], ########### BT calibration(cnt=%d) ###########\n", 2324 "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
2338 cnt_bt_cal_chk); 2325 cnt_bt_cal_chk);
2339 mdelay(50); 2326 mdelay(50);
2340 } else { 2327 } else {
2341 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 2328 btc_iface_dbg(INTF_INIT,
2342 "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n", 2329 "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
2343 cnt_bt_cal_chk); 2330 cnt_bt_cal_chk);
2344 break; 2331 break;
2345 } 2332 }
2346 } 2333 }
@@ -2383,8 +2370,8 @@ void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist)
2383 2370
2384void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist) 2371void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
2385{ 2372{
2386 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 2373 btc_iface_dbg(INTF_INIT,
2387 "[BTCoex], Coex Mechanism Init!!\n"); 2374 "[BTCoex], Coex Mechanism Init!!\n");
2388 2375
2389 btcoexist->stop_coex_dm = false; 2376 btcoexist->stop_coex_dm = false;
2390 2377
@@ -2677,8 +2664,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
2677 return; 2664 return;
2678 2665
2679 if (BTC_IPS_ENTER == type) { 2666 if (BTC_IPS_ENTER == type) {
2680 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2667 btc_iface_dbg(INTF_NOTIFY,
2681 "[BTCoex], IPS ENTER notify\n"); 2668 "[BTCoex], IPS ENTER notify\n");
2682 coex_sta->under_ips = true; 2669 coex_sta->under_ips = true;
2683 2670
2684 halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, 2671 halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
@@ -2689,8 +2676,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
2689 NORMAL_EXEC, 0); 2676 NORMAL_EXEC, 0);
2690 halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); 2677 halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
2691 } else if (BTC_IPS_LEAVE == type) { 2678 } else if (BTC_IPS_LEAVE == type) {
2692 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2679 btc_iface_dbg(INTF_NOTIFY,
2693 "[BTCoex], IPS LEAVE notify\n"); 2680 "[BTCoex], IPS LEAVE notify\n");
2694 coex_sta->under_ips = false; 2681 coex_sta->under_ips = false;
2695 2682
2696 halbtc8723b1ant_init_hw_config(btcoexist, false); 2683 halbtc8723b1ant_init_hw_config(btcoexist, false);
@@ -2705,12 +2692,12 @@ void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
2705 return; 2692 return;
2706 2693
2707 if (BTC_LPS_ENABLE == type) { 2694 if (BTC_LPS_ENABLE == type) {
2708 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2695 btc_iface_dbg(INTF_NOTIFY,
2709 "[BTCoex], LPS ENABLE notify\n"); 2696 "[BTCoex], LPS ENABLE notify\n");
2710 coex_sta->under_lps = true; 2697 coex_sta->under_lps = true;
2711 } else if (BTC_LPS_DISABLE == type) { 2698 } else if (BTC_LPS_DISABLE == type) {
2712 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2699 btc_iface_dbg(INTF_NOTIFY,
2713 "[BTCoex], LPS DISABLE notify\n"); 2700 "[BTCoex], LPS DISABLE notify\n");
2714 coex_sta->under_lps = false; 2701 coex_sta->under_lps = false;
2715 } 2702 }
2716} 2703}
@@ -2753,15 +2740,15 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
2753 } 2740 }
2754 2741
2755 if (BTC_SCAN_START == type) { 2742 if (BTC_SCAN_START == type) {
2756 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2743 btc_iface_dbg(INTF_NOTIFY,
2757 "[BTCoex], SCAN START notify\n"); 2744 "[BTCoex], SCAN START notify\n");
2758 if (!wifi_connected) /* non-connected scan */ 2745 if (!wifi_connected) /* non-connected scan */
2759 btc8723b1ant_action_wifi_not_conn_scan(btcoexist); 2746 btc8723b1ant_action_wifi_not_conn_scan(btcoexist);
2760 else /* wifi is connected */ 2747 else /* wifi is connected */
2761 btc8723b1ant_action_wifi_conn_scan(btcoexist); 2748 btc8723b1ant_action_wifi_conn_scan(btcoexist);
2762 } else if (BTC_SCAN_FINISH == type) { 2749 } else if (BTC_SCAN_FINISH == type) {
2763 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2750 btc_iface_dbg(INTF_NOTIFY,
2764 "[BTCoex], SCAN FINISH notify\n"); 2751 "[BTCoex], SCAN FINISH notify\n");
2765 if (!wifi_connected) /* non-connected scan */ 2752 if (!wifi_connected) /* non-connected scan */
2766 btc8723b1ant_action_wifi_not_conn(btcoexist); 2753 btc8723b1ant_action_wifi_not_conn(btcoexist);
2767 else 2754 else
@@ -2802,12 +2789,12 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
2802 } 2789 }
2803 2790
2804 if (BTC_ASSOCIATE_START == type) { 2791 if (BTC_ASSOCIATE_START == type) {
2805 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2792 btc_iface_dbg(INTF_NOTIFY,
2806 "[BTCoex], CONNECT START notify\n"); 2793 "[BTCoex], CONNECT START notify\n");
2807 btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist); 2794 btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist);
2808 } else if (BTC_ASSOCIATE_FINISH == type) { 2795 } else if (BTC_ASSOCIATE_FINISH == type) {
2809 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2796 btc_iface_dbg(INTF_NOTIFY,
2810 "[BTCoex], CONNECT FINISH notify\n"); 2797 "[BTCoex], CONNECT FINISH notify\n");
2811 2798
2812 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, 2799 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
2813 &wifi_connected); 2800 &wifi_connected);
@@ -2830,11 +2817,11 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
2830 return; 2817 return;
2831 2818
2832 if (BTC_MEDIA_CONNECT == type) 2819 if (BTC_MEDIA_CONNECT == type)
2833 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2820 btc_iface_dbg(INTF_NOTIFY,
2834 "[BTCoex], MEDIA connect notify\n"); 2821 "[BTCoex], MEDIA connect notify\n");
2835 else 2822 else
2836 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2823 btc_iface_dbg(INTF_NOTIFY,
2837 "[BTCoex], MEDIA disconnect notify\n"); 2824 "[BTCoex], MEDIA disconnect notify\n");
2838 2825
2839 /* only 2.4G we need to inform bt the chnl mask */ 2826 /* only 2.4G we need to inform bt the chnl mask */
2840 btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, 2827 btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -2855,10 +2842,10 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
2855 coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; 2842 coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
2856 coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; 2843 coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
2857 2844
2858 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 2845 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
2859 "[BTCoex], FW write 0x66 = 0x%x\n", 2846 "[BTCoex], FW write 0x66 = 0x%x\n",
2860 h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | 2847 h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
2861 h2c_parameter[2]); 2848 h2c_parameter[2]);
2862 2849
2863 btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); 2850 btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
2864} 2851}
@@ -2900,8 +2887,8 @@ void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
2900 2887
2901 if (BTC_PACKET_DHCP == type || 2888 if (BTC_PACKET_DHCP == type ||
2902 BTC_PACKET_EAPOL == type) { 2889 BTC_PACKET_EAPOL == type) {
2903 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2890 btc_iface_dbg(INTF_NOTIFY,
2904 "[BTCoex], special Packet(%d) notify\n", type); 2891 "[BTCoex], special Packet(%d) notify\n", type);
2905 halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist); 2892 halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
2906 } 2893 }
2907} 2894}
@@ -2921,19 +2908,19 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
2921 rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW; 2908 rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
2922 coex_sta->bt_info_c2h_cnt[rsp_source]++; 2909 coex_sta->bt_info_c2h_cnt[rsp_source]++;
2923 2910
2924 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2911 btc_iface_dbg(INTF_NOTIFY,
2925 "[BTCoex], Bt info[%d], length=%d, hex data = [", 2912 "[BTCoex], Bt info[%d], length=%d, hex data = [",
2926 rsp_source, length); 2913 rsp_source, length);
2927 for (i = 0; i < length; i++) { 2914 for (i = 0; i < length; i++) {
2928 coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; 2915 coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
2929 if (i == 1) 2916 if (i == 1)
2930 bt_info = tmp_buf[i]; 2917 bt_info = tmp_buf[i];
2931 if (i == length - 1) 2918 if (i == length - 1)
2932 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2919 btc_iface_dbg(INTF_NOTIFY,
2933 "0x%02x]\n", tmp_buf[i]); 2920 "0x%02x]\n", tmp_buf[i]);
2934 else 2921 else
2935 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2922 btc_iface_dbg(INTF_NOTIFY,
2936 "0x%02x, ", tmp_buf[i]); 2923 "0x%02x, ", tmp_buf[i]);
2937 } 2924 }
2938 2925
2939 if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) { 2926 if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) {
@@ -2950,8 +2937,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
2950 * because bt is reset and loss of the info. 2937 * because bt is reset and loss of the info.
2951 */ 2938 */
2952 if (coex_sta->bt_info_ext & BIT1) { 2939 if (coex_sta->bt_info_ext & BIT1) {
2953 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2940 btc_alg_dbg(ALGO_TRACE,
2954 "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); 2941 "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
2955 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, 2942 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
2956 &wifi_connected); 2943 &wifi_connected);
2957 if (wifi_connected) 2944 if (wifi_connected)
@@ -2965,8 +2952,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
2965 if (coex_sta->bt_info_ext & BIT3) { 2952 if (coex_sta->bt_info_ext & BIT3) {
2966 if (!btcoexist->manual_control && 2953 if (!btcoexist->manual_control &&
2967 !btcoexist->stop_coex_dm) { 2954 !btcoexist->stop_coex_dm) {
2968 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2955 btc_alg_dbg(ALGO_TRACE,
2969 "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n"); 2956 "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
2970 halbtc8723b1ant_ignore_wlan_act(btcoexist, 2957 halbtc8723b1ant_ignore_wlan_act(btcoexist,
2971 FORCE_EXEC, 2958 FORCE_EXEC,
2972 false); 2959 false);
@@ -3021,30 +3008,30 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
3021 3008
3022 if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) { 3009 if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) {
3023 coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE; 3010 coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
3024 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3011 btc_alg_dbg(ALGO_TRACE,
3025 "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n"); 3012 "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
3026 /* connection exists but no busy */ 3013 /* connection exists but no busy */
3027 } else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) { 3014 } else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) {
3028 coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE; 3015 coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
3029 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3016 btc_alg_dbg(ALGO_TRACE,
3030 "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); 3017 "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
3031 } else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) || 3018 } else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
3032 (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) { 3019 (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) {
3033 coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY; 3020 coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
3034 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3021 btc_alg_dbg(ALGO_TRACE,
3035 "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); 3022 "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
3036 } else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) { 3023 } else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
3037 if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status) 3024 if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
3038 coex_dm->auto_tdma_adjust = false; 3025 coex_dm->auto_tdma_adjust = false;
3039 3026
3040 coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY; 3027 coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
3041 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3028 btc_alg_dbg(ALGO_TRACE,
3042 "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); 3029 "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
3043 } else { 3030 } else {
3044 coex_dm->bt_status = 3031 coex_dm->bt_status =
3045 BT_8723B_1ANT_BT_STATUS_MAX; 3032 BT_8723B_1ANT_BT_STATUS_MAX;
3046 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3033 btc_alg_dbg(ALGO_TRACE,
3047 "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n"); 3034 "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
3048 } 3035 }
3049 3036
3050 if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || 3037 if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3060,7 +3047,7 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
3060 3047
3061void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist) 3048void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
3062{ 3049{
3063 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); 3050 btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
3064 3051
3065 btcoexist->stop_coex_dm = true; 3052 btcoexist->stop_coex_dm = true;
3066 3053
@@ -3078,11 +3065,11 @@ void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
3078 3065
3079void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) 3066void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
3080{ 3067{
3081 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Pnp notify\n"); 3068 btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Pnp notify\n");
3082 3069
3083 if (BTC_WIFI_PNP_SLEEP == pnp_state) { 3070 if (BTC_WIFI_PNP_SLEEP == pnp_state) {
3084 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3071 btc_iface_dbg(INTF_NOTIFY,
3085 "[BTCoex], Pnp notify to SLEEP\n"); 3072 "[BTCoex], Pnp notify to SLEEP\n");
3086 btcoexist->stop_coex_dm = true; 3073 btcoexist->stop_coex_dm = true;
3087 halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, 3074 halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false,
3088 true); 3075 true);
@@ -3092,8 +3079,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
3092 halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); 3079 halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
3093 halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); 3080 halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
3094 } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) { 3081 } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
3095 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3082 btc_iface_dbg(INTF_NOTIFY,
3096 "[BTCoex], Pnp notify to WAKE UP\n"); 3083 "[BTCoex], Pnp notify to WAKE UP\n");
3097 btcoexist->stop_coex_dm = false; 3084 btcoexist->stop_coex_dm = false;
3098 halbtc8723b1ant_init_hw_config(btcoexist, false); 3085 halbtc8723b1ant_init_hw_config(btcoexist, false);
3099 halbtc8723b1ant_init_coex_dm(btcoexist); 3086 halbtc8723b1ant_init_coex_dm(btcoexist);
@@ -3103,8 +3090,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
3103 3090
3104void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist) 3091void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
3105{ 3092{
3106 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3093 btc_alg_dbg(ALGO_TRACE,
3107 "[BTCoex], *****************Coex DM Reset****************\n"); 3094 "[BTCoex], *****************Coex DM Reset****************\n");
3108 3095
3109 halbtc8723b1ant_init_hw_config(btcoexist, false); 3096 halbtc8723b1ant_init_hw_config(btcoexist, false);
3110 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); 3097 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -3119,31 +3106,31 @@ void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist)
3119 static u8 dis_ver_info_cnt; 3106 static u8 dis_ver_info_cnt;
3120 u32 fw_ver = 0, bt_patch_ver = 0; 3107 u32 fw_ver = 0, bt_patch_ver = 0;
3121 3108
3122 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3109 btc_alg_dbg(ALGO_TRACE,
3123 "[BTCoex], ==========================Periodical===========================\n"); 3110 "[BTCoex], ==========================Periodical===========================\n");
3124 3111
3125 if (dis_ver_info_cnt <= 5) { 3112 if (dis_ver_info_cnt <= 5) {
3126 dis_ver_info_cnt += 1; 3113 dis_ver_info_cnt += 1;
3127 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3114 btc_iface_dbg(INTF_INIT,
3128 "[BTCoex], ****************************************************************\n"); 3115 "[BTCoex], ****************************************************************\n");
3129 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3116 btc_iface_dbg(INTF_INIT,
3130 "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", 3117 "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
3131 board_info->pg_ant_num, board_info->btdm_ant_num, 3118 board_info->pg_ant_num, board_info->btdm_ant_num,
3132 board_info->btdm_ant_pos); 3119 board_info->btdm_ant_pos);
3133 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3120 btc_iface_dbg(INTF_INIT,
3134 "[BTCoex], BT stack/ hci ext ver = %s / %d\n", 3121 "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
3135 ((stack_info->profile_notified) ? "Yes" : "No"), 3122 stack_info->profile_notified ? "Yes" : "No",
3136 stack_info->hci_version); 3123 stack_info->hci_version);
3137 btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, 3124 btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
3138 &bt_patch_ver); 3125 &bt_patch_ver);
3139 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); 3126 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
3140 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3127 btc_iface_dbg(INTF_INIT,
3141 "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", 3128 "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
3142 glcoex_ver_date_8723b_1ant, 3129 glcoex_ver_date_8723b_1ant,
3143 glcoex_ver_8723b_1ant, fw_ver, 3130 glcoex_ver_8723b_1ant, fw_ver,
3144 bt_patch_ver, bt_patch_ver); 3131 bt_patch_ver, bt_patch_ver);
3145 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3132 btc_iface_dbg(INTF_INIT,
3146 "[BTCoex], ****************************************************************\n"); 3133 "[BTCoex], ****************************************************************\n");
3147 } 3134 }
3148 3135
3149#if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0) 3136#if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index c43ab59a690a..5f488ecaef70 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -72,32 +72,28 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
72 if (bt_rssi >= rssi_thresh + 72 if (bt_rssi >= rssi_thresh +
73 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { 73 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
74 bt_rssi_state = BTC_RSSI_STATE_HIGH; 74 bt_rssi_state = BTC_RSSI_STATE_HIGH;
75 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 75 btc_alg_dbg(ALGO_BT_RSSI_STATE,
76 "[BTCoex], BT Rssi state " 76 "[BTCoex], BT Rssi state switch to High\n");
77 "switch to High\n");
78 } else { 77 } else {
79 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; 78 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
80 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 79 btc_alg_dbg(ALGO_BT_RSSI_STATE,
81 "[BTCoex], BT Rssi state " 80 "[BTCoex], BT Rssi state stay at Low\n");
82 "stay at Low\n");
83 } 81 }
84 } else { 82 } else {
85 if (bt_rssi < rssi_thresh) { 83 if (bt_rssi < rssi_thresh) {
86 bt_rssi_state = BTC_RSSI_STATE_LOW; 84 bt_rssi_state = BTC_RSSI_STATE_LOW;
87 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 85 btc_alg_dbg(ALGO_BT_RSSI_STATE,
88 "[BTCoex], BT Rssi state " 86 "[BTCoex], BT Rssi state switch to Low\n");
89 "switch to Low\n");
90 } else { 87 } else {
91 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 88 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
92 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 89 btc_alg_dbg(ALGO_BT_RSSI_STATE,
93 "[BTCoex], BT Rssi state " 90 "[BTCoex], BT Rssi state stay at High\n");
94 "stay at High\n");
95 } 91 }
96 } 92 }
97 } else if (level_num == 3) { 93 } else if (level_num == 3) {
98 if (rssi_thresh > rssi_thresh1) { 94 if (rssi_thresh > rssi_thresh1) {
99 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 95 btc_alg_dbg(ALGO_BT_RSSI_STATE,
100 "[BTCoex], BT Rssi thresh error!!\n"); 96 "[BTCoex], BT Rssi thresh error!!\n");
101 return coex_sta->pre_bt_rssi_state; 97 return coex_sta->pre_bt_rssi_state;
102 } 98 }
103 99
@@ -106,14 +102,12 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
106 if (bt_rssi >= rssi_thresh + 102 if (bt_rssi >= rssi_thresh +
107 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { 103 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
108 bt_rssi_state = BTC_RSSI_STATE_MEDIUM; 104 bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
109 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 105 btc_alg_dbg(ALGO_BT_RSSI_STATE,
110 "[BTCoex], BT Rssi state " 106 "[BTCoex], BT Rssi state switch to Medium\n");
111 "switch to Medium\n");
112 } else { 107 } else {
113 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; 108 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
114 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 109 btc_alg_dbg(ALGO_BT_RSSI_STATE,
115 "[BTCoex], BT Rssi state " 110 "[BTCoex], BT Rssi state stay at Low\n");
116 "stay at Low\n");
117 } 111 }
118 } else if ((coex_sta->pre_bt_rssi_state == 112 } else if ((coex_sta->pre_bt_rssi_state ==
119 BTC_RSSI_STATE_MEDIUM) || 113 BTC_RSSI_STATE_MEDIUM) ||
@@ -122,31 +116,26 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
122 if (bt_rssi >= rssi_thresh1 + 116 if (bt_rssi >= rssi_thresh1 +
123 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { 117 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
124 bt_rssi_state = BTC_RSSI_STATE_HIGH; 118 bt_rssi_state = BTC_RSSI_STATE_HIGH;
125 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 119 btc_alg_dbg(ALGO_BT_RSSI_STATE,
126 "[BTCoex], BT Rssi state " 120 "[BTCoex], BT Rssi state switch to High\n");
127 "switch to High\n");
128 } else if (bt_rssi < rssi_thresh) { 121 } else if (bt_rssi < rssi_thresh) {
129 bt_rssi_state = BTC_RSSI_STATE_LOW; 122 bt_rssi_state = BTC_RSSI_STATE_LOW;
130 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 123 btc_alg_dbg(ALGO_BT_RSSI_STATE,
131 "[BTCoex], BT Rssi state " 124 "[BTCoex], BT Rssi state switch to Low\n");
132 "switch to Low\n");
133 } else { 125 } else {
134 bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; 126 bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
135 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 127 btc_alg_dbg(ALGO_BT_RSSI_STATE,
136 "[BTCoex], BT Rssi state " 128 "[BTCoex], BT Rssi state stay at Medium\n");
137 "stay at Medium\n");
138 } 129 }
139 } else { 130 } else {
140 if (bt_rssi < rssi_thresh1) { 131 if (bt_rssi < rssi_thresh1) {
141 bt_rssi_state = BTC_RSSI_STATE_MEDIUM; 132 bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
142 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 133 btc_alg_dbg(ALGO_BT_RSSI_STATE,
143 "[BTCoex], BT Rssi state " 134 "[BTCoex], BT Rssi state switch to Medium\n");
144 "switch to Medium\n");
145 } else { 135 } else {
146 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 136 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
147 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 137 btc_alg_dbg(ALGO_BT_RSSI_STATE,
148 "[BTCoex], BT Rssi state " 138 "[BTCoex], BT Rssi state stay at High\n");
149 "stay at High\n");
150 } 139 }
151 } 140 }
152 } 141 }
@@ -173,36 +162,28 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
173 if (wifi_rssi >= rssi_thresh + 162 if (wifi_rssi >= rssi_thresh +
174 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { 163 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
175 wifi_rssi_state = BTC_RSSI_STATE_HIGH; 164 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
176 BTC_PRINT(BTC_MSG_ALGORITHM, 165 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
177 ALGO_WIFI_RSSI_STATE, 166 "[BTCoex], wifi RSSI state switch to High\n");
178 "[BTCoex], wifi RSSI state "
179 "switch to High\n");
180 } else { 167 } else {
181 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; 168 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
182 BTC_PRINT(BTC_MSG_ALGORITHM, 169 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
183 ALGO_WIFI_RSSI_STATE, 170 "[BTCoex], wifi RSSI state stay at Low\n");
184 "[BTCoex], wifi RSSI state "
185 "stay at Low\n");
186 } 171 }
187 } else { 172 } else {
188 if (wifi_rssi < rssi_thresh) { 173 if (wifi_rssi < rssi_thresh) {
189 wifi_rssi_state = BTC_RSSI_STATE_LOW; 174 wifi_rssi_state = BTC_RSSI_STATE_LOW;
190 BTC_PRINT(BTC_MSG_ALGORITHM, 175 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
191 ALGO_WIFI_RSSI_STATE, 176 "[BTCoex], wifi RSSI state switch to Low\n");
192 "[BTCoex], wifi RSSI state "
193 "switch to Low\n");
194 } else { 177 } else {
195 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 178 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
196 BTC_PRINT(BTC_MSG_ALGORITHM, 179 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
197 ALGO_WIFI_RSSI_STATE, 180 "[BTCoex], wifi RSSI state stay at High\n");
198 "[BTCoex], wifi RSSI state "
199 "stay at High\n");
200 } 181 }
201 } 182 }
202 } else if (level_num == 3) { 183 } else if (level_num == 3) {
203 if (rssi_thresh > rssi_thresh1) { 184 if (rssi_thresh > rssi_thresh1) {
204 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, 185 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
205 "[BTCoex], wifi RSSI thresh error!!\n"); 186 "[BTCoex], wifi RSSI thresh error!!\n");
206 return coex_sta->pre_wifi_rssi_state[index]; 187 return coex_sta->pre_wifi_rssi_state[index];
207 } 188 }
208 189
@@ -213,16 +194,12 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
213 if (wifi_rssi >= rssi_thresh + 194 if (wifi_rssi >= rssi_thresh +
214 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { 195 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
215 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; 196 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
216 BTC_PRINT(BTC_MSG_ALGORITHM, 197 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
217 ALGO_WIFI_RSSI_STATE, 198 "[BTCoex], wifi RSSI state switch to Medium\n");
218 "[BTCoex], wifi RSSI state "
219 "switch to Medium\n");
220 } else { 199 } else {
221 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; 200 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
222 BTC_PRINT(BTC_MSG_ALGORITHM, 201 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
223 ALGO_WIFI_RSSI_STATE, 202 "[BTCoex], wifi RSSI state stay at Low\n");
224 "[BTCoex], wifi RSSI state "
225 "stay at Low\n");
226 } 203 }
227 } else if ((coex_sta->pre_wifi_rssi_state[index] == 204 } else if ((coex_sta->pre_wifi_rssi_state[index] ==
228 BTC_RSSI_STATE_MEDIUM) || 205 BTC_RSSI_STATE_MEDIUM) ||
@@ -231,36 +208,26 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
231 if (wifi_rssi >= rssi_thresh1 + 208 if (wifi_rssi >= rssi_thresh1 +
232 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { 209 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
233 wifi_rssi_state = BTC_RSSI_STATE_HIGH; 210 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
234 BTC_PRINT(BTC_MSG_ALGORITHM, 211 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
235 ALGO_WIFI_RSSI_STATE, 212 "[BTCoex], wifi RSSI state switch to High\n");
236 "[BTCoex], wifi RSSI state "
237 "switch to High\n");
238 } else if (wifi_rssi < rssi_thresh) { 213 } else if (wifi_rssi < rssi_thresh) {
239 wifi_rssi_state = BTC_RSSI_STATE_LOW; 214 wifi_rssi_state = BTC_RSSI_STATE_LOW;
240 BTC_PRINT(BTC_MSG_ALGORITHM, 215 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
241 ALGO_WIFI_RSSI_STATE, 216 "[BTCoex], wifi RSSI state switch to Low\n");
242 "[BTCoex], wifi RSSI state "
243 "switch to Low\n");
244 } else { 217 } else {
245 wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; 218 wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
246 BTC_PRINT(BTC_MSG_ALGORITHM, 219 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
247 ALGO_WIFI_RSSI_STATE, 220 "[BTCoex], wifi RSSI state stay at Medium\n");
248 "[BTCoex], wifi RSSI state "
249 "stay at Medium\n");
250 } 221 }
251 } else { 222 } else {
252 if (wifi_rssi < rssi_thresh1) { 223 if (wifi_rssi < rssi_thresh1) {
253 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; 224 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
254 BTC_PRINT(BTC_MSG_ALGORITHM, 225 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
255 ALGO_WIFI_RSSI_STATE, 226 "[BTCoex], wifi RSSI state switch to Medium\n");
256 "[BTCoex], wifi RSSI state "
257 "switch to Medium\n");
258 } else { 227 } else {
259 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 228 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
260 BTC_PRINT(BTC_MSG_ALGORITHM, 229 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
261 ALGO_WIFI_RSSI_STATE, 230 "[BTCoex], wifi RSSI state stay at High\n");
262 "[BTCoex], wifi RSSI state "
263 "stay at High\n");
264 } 231 }
265 } 232 }
266 } 233 }
@@ -292,12 +259,12 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
292 coex_sta->low_priority_tx = reg_lp_tx; 259 coex_sta->low_priority_tx = reg_lp_tx;
293 coex_sta->low_priority_rx = reg_lp_rx; 260 coex_sta->low_priority_rx = reg_lp_rx;
294 261
295 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 262 btc_alg_dbg(ALGO_BT_MONITOR,
296 "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", 263 "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
297 reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); 264 reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
298 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 265 btc_alg_dbg(ALGO_BT_MONITOR,
299 "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", 266 "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
300 reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); 267 reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
301 268
302 /* reset counter */ 269 /* reset counter */
303 btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); 270 btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -311,9 +278,9 @@ static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
311 278
312 h2c_parameter[0] |= BIT0; /* trigger */ 279 h2c_parameter[0] |= BIT0; /* trigger */
313 280
314 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 281 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
315 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", 282 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
316 h2c_parameter[0]); 283 h2c_parameter[0]);
317 284
318 btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); 285 btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
319} 286}
@@ -427,8 +394,8 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
427 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); 394 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
428 395
429 if (!bt_link_info->bt_link_exist) { 396 if (!bt_link_info->bt_link_exist) {
430 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 397 btc_alg_dbg(ALGO_TRACE,
431 "[BTCoex], No BT link exists!!!\n"); 398 "[BTCoex], No BT link exists!!!\n");
432 return algorithm; 399 return algorithm;
433 } 400 }
434 401
@@ -443,27 +410,27 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
443 410
444 if (num_of_diff_profile == 1) { 411 if (num_of_diff_profile == 1) {
445 if (bt_link_info->sco_exist) { 412 if (bt_link_info->sco_exist) {
446 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 413 btc_alg_dbg(ALGO_TRACE,
447 "[BTCoex], SCO only\n"); 414 "[BTCoex], SCO only\n");
448 algorithm = BT_8723B_2ANT_COEX_ALGO_SCO; 415 algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
449 } else { 416 } else {
450 if (bt_link_info->hid_exist) { 417 if (bt_link_info->hid_exist) {
451 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 418 btc_alg_dbg(ALGO_TRACE,
452 "[BTCoex], HID only\n"); 419 "[BTCoex], HID only\n");
453 algorithm = BT_8723B_2ANT_COEX_ALGO_HID; 420 algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
454 } else if (bt_link_info->a2dp_exist) { 421 } else if (bt_link_info->a2dp_exist) {
455 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 422 btc_alg_dbg(ALGO_TRACE,
456 "[BTCoex], A2DP only\n"); 423 "[BTCoex], A2DP only\n");
457 algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP; 424 algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
458 } else if (bt_link_info->pan_exist) { 425 } else if (bt_link_info->pan_exist) {
459 if (bt_hs_on) { 426 if (bt_hs_on) {
460 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 427 btc_alg_dbg(ALGO_TRACE,
461 "[BTCoex], PAN(HS) only\n"); 428 "[BTCoex], PAN(HS) only\n");
462 algorithm = 429 algorithm =
463 BT_8723B_2ANT_COEX_ALGO_PANHS; 430 BT_8723B_2ANT_COEX_ALGO_PANHS;
464 } else { 431 } else {
465 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 432 btc_alg_dbg(ALGO_TRACE,
466 "[BTCoex], PAN(EDR) only\n"); 433 "[BTCoex], PAN(EDR) only\n");
467 algorithm = 434 algorithm =
468 BT_8723B_2ANT_COEX_ALGO_PANEDR; 435 BT_8723B_2ANT_COEX_ALGO_PANEDR;
469 } 436 }
@@ -472,21 +439,21 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
472 } else if (num_of_diff_profile == 2) { 439 } else if (num_of_diff_profile == 2) {
473 if (bt_link_info->sco_exist) { 440 if (bt_link_info->sco_exist) {
474 if (bt_link_info->hid_exist) { 441 if (bt_link_info->hid_exist) {
475 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 442 btc_alg_dbg(ALGO_TRACE,
476 "[BTCoex], SCO + HID\n"); 443 "[BTCoex], SCO + HID\n");
477 algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; 444 algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
478 } else if (bt_link_info->a2dp_exist) { 445 } else if (bt_link_info->a2dp_exist) {
479 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 446 btc_alg_dbg(ALGO_TRACE,
480 "[BTCoex], SCO + A2DP ==> SCO\n"); 447 "[BTCoex], SCO + A2DP ==> SCO\n");
481 algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; 448 algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
482 } else if (bt_link_info->pan_exist) { 449 } else if (bt_link_info->pan_exist) {
483 if (bt_hs_on) { 450 if (bt_hs_on) {
484 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 451 btc_alg_dbg(ALGO_TRACE,
485 "[BTCoex], SCO + PAN(HS)\n"); 452 "[BTCoex], SCO + PAN(HS)\n");
486 algorithm = BT_8723B_2ANT_COEX_ALGO_SCO; 453 algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
487 } else { 454 } else {
488 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 455 btc_alg_dbg(ALGO_TRACE,
489 "[BTCoex], SCO + PAN(EDR)\n"); 456 "[BTCoex], SCO + PAN(EDR)\n");
490 algorithm = 457 algorithm =
491 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; 458 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
492 } 459 }
@@ -494,31 +461,31 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
494 } else { 461 } else {
495 if (bt_link_info->hid_exist && 462 if (bt_link_info->hid_exist &&
496 bt_link_info->a2dp_exist) { 463 bt_link_info->a2dp_exist) {
497 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 464 btc_alg_dbg(ALGO_TRACE,
498 "[BTCoex], HID + A2DP\n"); 465 "[BTCoex], HID + A2DP\n");
499 algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP; 466 algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
500 } else if (bt_link_info->hid_exist && 467 } else if (bt_link_info->hid_exist &&
501 bt_link_info->pan_exist) { 468 bt_link_info->pan_exist) {
502 if (bt_hs_on) { 469 if (bt_hs_on) {
503 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 470 btc_alg_dbg(ALGO_TRACE,
504 "[BTCoex], HID + PAN(HS)\n"); 471 "[BTCoex], HID + PAN(HS)\n");
505 algorithm = BT_8723B_2ANT_COEX_ALGO_HID; 472 algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
506 } else { 473 } else {
507 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 474 btc_alg_dbg(ALGO_TRACE,
508 "[BTCoex], HID + PAN(EDR)\n"); 475 "[BTCoex], HID + PAN(EDR)\n");
509 algorithm = 476 algorithm =
510 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; 477 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
511 } 478 }
512 } else if (bt_link_info->pan_exist && 479 } else if (bt_link_info->pan_exist &&
513 bt_link_info->a2dp_exist) { 480 bt_link_info->a2dp_exist) {
514 if (bt_hs_on) { 481 if (bt_hs_on) {
515 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 482 btc_alg_dbg(ALGO_TRACE,
516 "[BTCoex], A2DP + PAN(HS)\n"); 483 "[BTCoex], A2DP + PAN(HS)\n");
517 algorithm = 484 algorithm =
518 BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS; 485 BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
519 } else { 486 } else {
520 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 487 btc_alg_dbg(ALGO_TRACE,
521 "[BTCoex],A2DP + PAN(EDR)\n"); 488 "[BTCoex],A2DP + PAN(EDR)\n");
522 algorithm = 489 algorithm =
523 BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP; 490 BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
524 } 491 }
@@ -528,37 +495,32 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
528 if (bt_link_info->sco_exist) { 495 if (bt_link_info->sco_exist) {
529 if (bt_link_info->hid_exist && 496 if (bt_link_info->hid_exist &&
530 bt_link_info->a2dp_exist) { 497 bt_link_info->a2dp_exist) {
531 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 498 btc_alg_dbg(ALGO_TRACE,
532 "[BTCoex], SCO + HID + A2DP" 499 "[BTCoex], SCO + HID + A2DP ==> HID\n");
533 " ==> HID\n");
534 algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; 500 algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
535 } else if (bt_link_info->hid_exist && 501 } else if (bt_link_info->hid_exist &&
536 bt_link_info->pan_exist) { 502 bt_link_info->pan_exist) {
537 if (bt_hs_on) { 503 if (bt_hs_on) {
538 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 504 btc_alg_dbg(ALGO_TRACE,
539 "[BTCoex], SCO + HID + " 505 "[BTCoex], SCO + HID + PAN(HS)\n");
540 "PAN(HS)\n");
541 algorithm = 506 algorithm =
542 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; 507 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
543 } else { 508 } else {
544 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 509 btc_alg_dbg(ALGO_TRACE,
545 "[BTCoex], SCO + HID + " 510 "[BTCoex], SCO + HID + PAN(EDR)\n");
546 "PAN(EDR)\n");
547 algorithm = 511 algorithm =
548 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; 512 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
549 } 513 }
550 } else if (bt_link_info->pan_exist && 514 } else if (bt_link_info->pan_exist &&
551 bt_link_info->a2dp_exist) { 515 bt_link_info->a2dp_exist) {
552 if (bt_hs_on) { 516 if (bt_hs_on) {
553 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 517 btc_alg_dbg(ALGO_TRACE,
554 "[BTCoex], SCO + A2DP + " 518 "[BTCoex], SCO + A2DP + PAN(HS)\n");
555 "PAN(HS)\n");
556 algorithm = 519 algorithm =
557 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; 520 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
558 } else { 521 } else {
559 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 522 btc_alg_dbg(ALGO_TRACE,
560 "[BTCoex], SCO + A2DP + " 523 "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
561 "PAN(EDR) ==> HID\n");
562 algorithm = 524 algorithm =
563 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; 525 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
564 } 526 }
@@ -568,15 +530,13 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
568 bt_link_info->pan_exist && 530 bt_link_info->pan_exist &&
569 bt_link_info->a2dp_exist) { 531 bt_link_info->a2dp_exist) {
570 if (bt_hs_on) { 532 if (bt_hs_on) {
571 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 533 btc_alg_dbg(ALGO_TRACE,
572 "[BTCoex], HID + A2DP + " 534 "[BTCoex], HID + A2DP + PAN(HS)\n");
573 "PAN(HS)\n");
574 algorithm = 535 algorithm =
575 BT_8723B_2ANT_COEX_ALGO_HID_A2DP; 536 BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
576 } else { 537 } else {
577 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 538 btc_alg_dbg(ALGO_TRACE,
578 "[BTCoex], HID + A2DP + " 539 "[BTCoex], HID + A2DP + PAN(EDR)\n");
579 "PAN(EDR)\n");
580 algorithm = 540 algorithm =
581 BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR; 541 BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
582 } 542 }
@@ -588,13 +548,11 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
588 bt_link_info->pan_exist && 548 bt_link_info->pan_exist &&
589 bt_link_info->a2dp_exist) { 549 bt_link_info->a2dp_exist) {
590 if (bt_hs_on) { 550 if (bt_hs_on) {
591 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 551 btc_alg_dbg(ALGO_TRACE,
592 "[BTCoex], Error!!! SCO + HID" 552 "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
593 " + A2DP + PAN(HS)\n");
594 } else { 553 } else {
595 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 554 btc_alg_dbg(ALGO_TRACE,
596 "[BTCoex], SCO + HID + A2DP +" 555 "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
597 " PAN(EDR)==>PAN(EDR)+HID\n");
598 algorithm = 556 algorithm =
599 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; 557 BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
600 } 558 }
@@ -624,17 +582,15 @@ static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
624 if (wifi_connected) { 582 if (wifi_connected) {
625 if (bt_hs_on) { 583 if (bt_hs_on) {
626 if (bt_hs_rssi > 37) { 584 if (bt_hs_rssi > 37) {
627 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 585 btc_alg_dbg(ALGO_TRACE_FW,
628 "[BTCoex], Need to decrease bt " 586 "[BTCoex], Need to decrease bt power for HS mode!!\n");
629 "power for HS mode!!\n");
630 ret = true; 587 ret = true;
631 } 588 }
632 } else { 589 } else {
633 if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || 590 if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
634 (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { 591 (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
635 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 592 btc_alg_dbg(ALGO_TRACE_FW,
636 "[BTCoex], Need to decrease bt " 593 "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
637 "power for Wifi is connected!!\n");
638 ret = true; 594 ret = true;
639 } 595 }
640 } 596 }
@@ -653,10 +609,10 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
653 */ 609 */
654 h2c_parameter[0] = dac_swing_lvl; 610 h2c_parameter[0] = dac_swing_lvl;
655 611
656 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 612 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
657 "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl); 613 "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
658 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 614 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
659 "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]); 615 "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
660 616
661 btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); 617 btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
662} 618}
@@ -671,9 +627,9 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
671 if (dec_bt_pwr) 627 if (dec_bt_pwr)
672 h2c_parameter[0] |= BIT1; 628 h2c_parameter[0] |= BIT1;
673 629
674 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 630 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
675 "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n", 631 "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
676 (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); 632 (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
677 633
678 btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); 634 btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
679} 635}
@@ -681,15 +637,15 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
681static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist, 637static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
682 bool force_exec, bool dec_bt_pwr) 638 bool force_exec, bool dec_bt_pwr)
683{ 639{
684 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 640 btc_alg_dbg(ALGO_TRACE_FW,
685 "[BTCoex], %s Dec BT power = %s\n", 641 "[BTCoex], %s Dec BT power = %s\n",
686 (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF")); 642 force_exec ? "force to" : "", dec_bt_pwr ? "ON" : "OFF");
687 coex_dm->cur_dec_bt_pwr = dec_bt_pwr; 643 coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
688 644
689 if (!force_exec) { 645 if (!force_exec) {
690 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 646 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
691 "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n", 647 "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
692 coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); 648 coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
693 649
694 if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) 650 if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
695 return; 651 return;
@@ -702,17 +658,16 @@ static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
702static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, 658static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
703 bool force_exec, u8 fw_dac_swing_lvl) 659 bool force_exec, u8 fw_dac_swing_lvl)
704{ 660{
705 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 661 btc_alg_dbg(ALGO_TRACE_FW,
706 "[BTCoex], %s set FW Dac Swing level = %d\n", 662 "[BTCoex], %s set FW Dac Swing level = %d\n",
707 (force_exec ? "force to" : ""), fw_dac_swing_lvl); 663 (force_exec ? "force to" : ""), fw_dac_swing_lvl);
708 coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl; 664 coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
709 665
710 if (!force_exec) { 666 if (!force_exec) {
711 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 667 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
712 "[BTCoex], preFwDacSwingLvl=%d, " 668 "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
713 "curFwDacSwingLvl=%d\n", 669 coex_dm->pre_fw_dac_swing_lvl,
714 coex_dm->pre_fw_dac_swing_lvl, 670 coex_dm->cur_fw_dac_swing_lvl);
715 coex_dm->cur_fw_dac_swing_lvl);
716 671
717 if (coex_dm->pre_fw_dac_swing_lvl == 672 if (coex_dm->pre_fw_dac_swing_lvl ==
718 coex_dm->cur_fw_dac_swing_lvl) 673 coex_dm->cur_fw_dac_swing_lvl)
@@ -729,16 +684,16 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
729{ 684{
730 if (rx_rf_shrink_on) { 685 if (rx_rf_shrink_on) {
731 /* Shrink RF Rx LPF corner */ 686 /* Shrink RF Rx LPF corner */
732 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 687 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
733 "[BTCoex], Shrink RF Rx LPF corner!!\n"); 688 "[BTCoex], Shrink RF Rx LPF corner!!\n");
734 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 689 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
735 0xfffff, 0xffffc); 690 0xfffff, 0xffffc);
736 } else { 691 } else {
737 /* Resume RF Rx LPF corner */ 692 /* Resume RF Rx LPF corner */
738 /* After initialized, we can use coex_dm->btRf0x1eBackup */ 693 /* After initialized, we can use coex_dm->btRf0x1eBackup */
739 if (btcoexist->initilized) { 694 if (btcoexist->initilized) {
740 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 695 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
741 "[BTCoex], Resume RF Rx LPF corner!!\n"); 696 "[BTCoex], Resume RF Rx LPF corner!!\n");
742 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 697 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
743 0xfffff, 698 0xfffff,
744 coex_dm->bt_rf0x1e_backup); 699 coex_dm->bt_rf0x1e_backup);
@@ -749,18 +704,17 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
749static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist, 704static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
750 bool force_exec, bool rx_rf_shrink_on) 705 bool force_exec, bool rx_rf_shrink_on)
751{ 706{
752 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 707 btc_alg_dbg(ALGO_TRACE_SW,
753 "[BTCoex], %s turn Rx RF Shrink = %s\n", 708 "[BTCoex], %s turn Rx RF Shrink = %s\n",
754 (force_exec ? "force to" : ""), (rx_rf_shrink_on ? 709 (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
755 "ON" : "OFF")); 710 "ON" : "OFF"));
756 coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; 711 coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
757 712
758 if (!force_exec) { 713 if (!force_exec) {
759 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 714 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
760 "[BTCoex], bPreRfRxLpfShrink=%d, " 715 "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
761 "bCurRfRxLpfShrink=%d\n", 716 coex_dm->pre_rf_rx_lpf_shrink,
762 coex_dm->pre_rf_rx_lpf_shrink, 717 coex_dm->cur_rf_rx_lpf_shrink);
763 coex_dm->cur_rf_rx_lpf_shrink);
764 718
765 if (coex_dm->pre_rf_rx_lpf_shrink == 719 if (coex_dm->pre_rf_rx_lpf_shrink ==
766 coex_dm->cur_rf_rx_lpf_shrink) 720 coex_dm->cur_rf_rx_lpf_shrink)
@@ -788,9 +742,9 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
788 h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/ 742 h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
789 } 743 }
790 744
791 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 745 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
792 "[BTCoex], set WiFi Low-Penalty Retry: %s", 746 "[BTCoex], set WiFi Low-Penalty Retry: %s",
793 (low_penalty_ra ? "ON!!" : "OFF!!")); 747 (low_penalty_ra ? "ON!!" : "OFF!!"));
794 748
795 btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); 749 btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
796} 750}
@@ -799,18 +753,17 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
799 bool force_exec, bool low_penalty_ra) 753 bool force_exec, bool low_penalty_ra)
800{ 754{
801 /*return; */ 755 /*return; */
802 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 756 btc_alg_dbg(ALGO_TRACE_SW,
803 "[BTCoex], %s turn LowPenaltyRA = %s\n", 757 "[BTCoex], %s turn LowPenaltyRA = %s\n",
804 (force_exec ? "force to" : ""), (low_penalty_ra ? 758 (force_exec ? "force to" : ""), (low_penalty_ra ?
805 "ON" : "OFF")); 759 "ON" : "OFF"));
806 coex_dm->cur_low_penalty_ra = low_penalty_ra; 760 coex_dm->cur_low_penalty_ra = low_penalty_ra;
807 761
808 if (!force_exec) { 762 if (!force_exec) {
809 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 763 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
810 "[BTCoex], bPreLowPenaltyRa=%d, " 764 "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
811 "bCurLowPenaltyRa=%d\n", 765 coex_dm->pre_low_penalty_ra,
812 coex_dm->pre_low_penalty_ra, 766 coex_dm->cur_low_penalty_ra);
813 coex_dm->cur_low_penalty_ra);
814 767
815 if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) 768 if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
816 return; 769 return;
@@ -824,8 +777,8 @@ static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
824 u32 level) 777 u32 level)
825{ 778{
826 u8 val = (u8) level; 779 u8 val = (u8) level;
827 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 780 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
828 "[BTCoex], Write SwDacSwing = 0x%x\n", level); 781 "[BTCoex], Write SwDacSwing = 0x%x\n", level);
829 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val); 782 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
830} 783}
831 784
@@ -843,20 +796,20 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
843 bool force_exec, bool dac_swing_on, 796 bool force_exec, bool dac_swing_on,
844 u32 dac_swing_lvl) 797 u32 dac_swing_lvl)
845{ 798{
846 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 799 btc_alg_dbg(ALGO_TRACE_SW,
847 "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n", 800 "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
848 (force_exec ? "force to" : ""), 801 (force_exec ? "force to" : ""),
849 (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl); 802 (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
850 coex_dm->cur_dac_swing_on = dac_swing_on; 803 coex_dm->cur_dac_swing_on = dac_swing_on;
851 coex_dm->cur_dac_swing_lvl = dac_swing_lvl; 804 coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
852 805
853 if (!force_exec) { 806 if (!force_exec) {
854 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 807 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
855 "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x," 808 "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
856 " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", 809 coex_dm->pre_dac_swing_on,
857 coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl, 810 coex_dm->pre_dac_swing_lvl,
858 coex_dm->cur_dac_swing_on, 811 coex_dm->cur_dac_swing_on,
859 coex_dm->cur_dac_swing_lvl); 812 coex_dm->cur_dac_swing_lvl);
860 813
861 if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && 814 if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
862 (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl)) 815 (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -877,8 +830,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
877 830
878 /* BB AGC Gain Table */ 831 /* BB AGC Gain Table */
879 if (agc_table_en) { 832 if (agc_table_en) {
880 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 833 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
881 "[BTCoex], BB Agc Table On!\n"); 834 "[BTCoex], BB Agc Table On!\n");
882 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001); 835 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
883 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001); 836 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
884 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001); 837 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
@@ -887,8 +840,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
887 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001); 840 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
888 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001); 841 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
889 } else { 842 } else {
890 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 843 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
891 "[BTCoex], BB Agc Table Off!\n"); 844 "[BTCoex], BB Agc Table Off!\n");
892 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001); 845 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
893 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001); 846 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
894 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001); 847 btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -901,15 +854,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
901 /* RF Gain */ 854 /* RF Gain */
902 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000); 855 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
903 if (agc_table_en) { 856 if (agc_table_en) {
904 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 857 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
905 "[BTCoex], Agc Table On!\n"); 858 "[BTCoex], Agc Table On!\n");
906 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 859 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
907 0xfffff, 0x38fff); 860 0xfffff, 0x38fff);
908 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 861 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
909 0xfffff, 0x38ffe); 862 0xfffff, 0x38ffe);
910 } else { 863 } else {
911 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 864 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
912 "[BTCoex], Agc Table Off!\n"); 865 "[BTCoex], Agc Table Off!\n");
913 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 866 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
914 0xfffff, 0x380c3); 867 0xfffff, 0x380c3);
915 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 868 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
@@ -920,15 +873,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
920 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1); 873 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
921 874
922 if (agc_table_en) { 875 if (agc_table_en) {
923 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 876 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
924 "[BTCoex], Agc Table On!\n"); 877 "[BTCoex], Agc Table On!\n");
925 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, 878 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
926 0xfffff, 0x38fff); 879 0xfffff, 0x38fff);
927 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, 880 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
928 0xfffff, 0x38ffe); 881 0xfffff, 0x38ffe);
929 } else { 882 } else {
930 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 883 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
931 "[BTCoex], Agc Table Off!\n"); 884 "[BTCoex], Agc Table Off!\n");
932 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, 885 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
933 0xfffff, 0x380c3); 886 0xfffff, 0x380c3);
934 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, 887 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
@@ -946,16 +899,17 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
946static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist, 899static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
947 bool force_exec, bool agc_table_en) 900 bool force_exec, bool agc_table_en)
948{ 901{
949 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 902 btc_alg_dbg(ALGO_TRACE_SW,
950 "[BTCoex], %s %s Agc Table\n", 903 "[BTCoex], %s %s Agc Table\n",
951 (force_exec ? "force to" : ""), 904 (force_exec ? "force to" : ""),
952 (agc_table_en ? "Enable" : "Disable")); 905 (agc_table_en ? "Enable" : "Disable"));
953 coex_dm->cur_agc_table_en = agc_table_en; 906 coex_dm->cur_agc_table_en = agc_table_en;
954 907
955 if (!force_exec) { 908 if (!force_exec) {
956 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 909 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
957 "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", 910 "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
958 coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en); 911 coex_dm->pre_agc_table_en,
912 coex_dm->cur_agc_table_en);
959 913
960 if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) 914 if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
961 return; 915 return;
@@ -969,20 +923,20 @@ static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
969 u32 val0x6c0, u32 val0x6c4, 923 u32 val0x6c0, u32 val0x6c4,
970 u32 val0x6c8, u8 val0x6cc) 924 u32 val0x6c8, u8 val0x6cc)
971{ 925{
972 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 926 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
973 "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0); 927 "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
974 btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); 928 btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
975 929
976 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 930 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
977 "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4); 931 "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
978 btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); 932 btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
979 933
980 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 934 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
981 "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8); 935 "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
982 btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); 936 btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
983 937
984 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 938 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
985 "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc); 939 "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
986 btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); 940 btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
987} 941}
988 942
@@ -991,29 +945,24 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
991 u32 val0x6c4, u32 val0x6c8, 945 u32 val0x6c4, u32 val0x6c8,
992 u8 val0x6cc) 946 u8 val0x6cc)
993{ 947{
994 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 948 btc_alg_dbg(ALGO_TRACE_SW,
995 "[BTCoex], %s write Coex Table 0x6c0=0x%x," 949 "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
996 " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", 950 force_exec ? "force to" : "",
997 (force_exec ? "force to" : ""), val0x6c0, 951 val0x6c0, val0x6c4, val0x6c8, val0x6cc);
998 val0x6c4, val0x6c8, val0x6cc);
999 coex_dm->cur_val0x6c0 = val0x6c0; 952 coex_dm->cur_val0x6c0 = val0x6c0;
1000 coex_dm->cur_val0x6c4 = val0x6c4; 953 coex_dm->cur_val0x6c4 = val0x6c4;
1001 coex_dm->cur_val0x6c8 = val0x6c8; 954 coex_dm->cur_val0x6c8 = val0x6c8;
1002 coex_dm->cur_val0x6cc = val0x6cc; 955 coex_dm->cur_val0x6cc = val0x6cc;
1003 956
1004 if (!force_exec) { 957 if (!force_exec) {
1005 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 958 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
1006 "[BTCoex], preVal0x6c0=0x%x, " 959 "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
1007 "preVal0x6c4=0x%x, preVal0x6c8=0x%x, " 960 coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
1008 "preVal0x6cc=0x%x !!\n", 961 coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
1009 coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4, 962 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
1010 coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); 963 "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
1011 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 964 coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
1012 "[BTCoex], curVal0x6c0=0x%x, " 965 coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
1013 "curVal0x6c4=0x%x, curVal0x6c8=0x%x, "
1014 "curVal0x6cc=0x%x !!\n",
1015 coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
1016 coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
1017 966
1018 if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && 967 if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
1019 (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && 968 (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1099,9 +1048,9 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
1099 if (enable) 1048 if (enable)
1100 h2c_parameter[0] |= BIT0;/* function enable*/ 1049 h2c_parameter[0] |= BIT0;/* function enable*/
1101 1050
1102 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 1051 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
1103 "[BTCoex], set FW for BT Ignore Wlan_Act, " 1052 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
1104 "FW write 0x63=0x%x\n", h2c_parameter[0]); 1053 h2c_parameter[0]);
1105 1054
1106 btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); 1055 btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
1107} 1056}
@@ -1109,17 +1058,16 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
1109static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist, 1058static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
1110 bool force_exec, bool enable) 1059 bool force_exec, bool enable)
1111{ 1060{
1112 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 1061 btc_alg_dbg(ALGO_TRACE_FW,
1113 "[BTCoex], %s turn Ignore WlanAct %s\n", 1062 "[BTCoex], %s turn Ignore WlanAct %s\n",
1114 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); 1063 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
1115 coex_dm->cur_ignore_wlan_act = enable; 1064 coex_dm->cur_ignore_wlan_act = enable;
1116 1065
1117 if (!force_exec) { 1066 if (!force_exec) {
1118 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1067 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1119 "[BTCoex], bPreIgnoreWlanAct = %d, " 1068 "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
1120 "bCurIgnoreWlanAct = %d!!\n", 1069 coex_dm->pre_ignore_wlan_act,
1121 coex_dm->pre_ignore_wlan_act, 1070 coex_dm->cur_ignore_wlan_act);
1122 coex_dm->cur_ignore_wlan_act);
1123 1071
1124 if (coex_dm->pre_ignore_wlan_act == 1072 if (coex_dm->pre_ignore_wlan_act ==
1125 coex_dm->cur_ignore_wlan_act) 1073 coex_dm->cur_ignore_wlan_act)
@@ -1147,11 +1095,11 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
1147 coex_dm->ps_tdma_para[3] = byte4; 1095 coex_dm->ps_tdma_para[3] = byte4;
1148 coex_dm->ps_tdma_para[4] = byte5; 1096 coex_dm->ps_tdma_para[4] = byte5;
1149 1097
1150 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 1098 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
1151 "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n", 1099 "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
1152 h2c_parameter[0], 1100 h2c_parameter[0],
1153 h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | 1101 h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
1154 h2c_parameter[3] << 8 | h2c_parameter[4]); 1102 h2c_parameter[3] << 8 | h2c_parameter[4]);
1155 1103
1156 btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); 1104 btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
1157} 1105}
@@ -1203,7 +1151,6 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
1203 1151
1204 /* Force GNT_BT to low */ 1152 /* Force GNT_BT to low */
1205 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0); 1153 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
1206 btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
1207 1154
1208 if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { 1155 if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
1209 /* tell firmware "no antenna inverse" */ 1156 /* tell firmware "no antenna inverse" */
@@ -1211,19 +1158,25 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
1211 h2c_parameter[1] = 1; /* ext switch type */ 1158 h2c_parameter[1] = 1; /* ext switch type */
1212 btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, 1159 btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
1213 h2c_parameter); 1160 h2c_parameter);
1161 btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
1214 } else { 1162 } else {
1215 /* tell firmware "antenna inverse" */ 1163 /* tell firmware "antenna inverse" */
1216 h2c_parameter[0] = 1; 1164 h2c_parameter[0] = 1;
1217 h2c_parameter[1] = 1; /* ext switch type */ 1165 h2c_parameter[1] = 1; /* ext switch type */
1218 btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, 1166 btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
1219 h2c_parameter); 1167 h2c_parameter);
1168 btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
1220 } 1169 }
1221 } 1170 }
1222 1171
1223 /* ext switch setting */ 1172 /* ext switch setting */
1224 if (use_ext_switch) { 1173 if (use_ext_switch) {
1225 /* fixed internal switch S1->WiFi, S0->BT */ 1174 /* fixed internal switch S1->WiFi, S0->BT */
1226 btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); 1175 if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
1176 btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
1177 else
1178 btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
1179
1227 switch (antpos_type) { 1180 switch (antpos_type) {
1228 case BTC_ANT_WIFI_AT_MAIN: 1181 case BTC_ANT_WIFI_AT_MAIN:
1229 /* ext switch main at wifi */ 1182 /* ext switch main at wifi */
@@ -1255,20 +1208,20 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
1255static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, 1208static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
1256 bool turn_on, u8 type) 1209 bool turn_on, u8 type)
1257{ 1210{
1258 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 1211 btc_alg_dbg(ALGO_TRACE_FW,
1259 "[BTCoex], %s turn %s PS TDMA, type=%d\n", 1212 "[BTCoex], %s turn %s PS TDMA, type=%d\n",
1260 (force_exec ? "force to" : ""), 1213 (force_exec ? "force to" : ""),
1261 (turn_on ? "ON" : "OFF"), type); 1214 (turn_on ? "ON" : "OFF"), type);
1262 coex_dm->cur_ps_tdma_on = turn_on; 1215 coex_dm->cur_ps_tdma_on = turn_on;
1263 coex_dm->cur_ps_tdma = type; 1216 coex_dm->cur_ps_tdma = type;
1264 1217
1265 if (!force_exec) { 1218 if (!force_exec) {
1266 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1219 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1267 "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", 1220 "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
1268 coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); 1221 coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
1269 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1222 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1270 "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", 1223 "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
1271 coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); 1224 coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
1272 1225
1273 if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && 1226 if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
1274 (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) 1227 (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1466,8 +1419,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
1466 btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, 1419 btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
1467 &low_pwr_disable); 1420 &low_pwr_disable);
1468 1421
1469 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1422 btc_alg_dbg(ALGO_TRACE,
1470 "[BTCoex], Wifi non-connected idle!!\n"); 1423 "[BTCoex], Wifi non-connected idle!!\n");
1471 1424
1472 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 1425 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
1473 0x0); 1426 0x0);
@@ -1490,9 +1443,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
1490 BTC_SET_ACT_DISABLE_LOW_POWER, 1443 BTC_SET_ACT_DISABLE_LOW_POWER,
1491 &low_pwr_disable); 1444 &low_pwr_disable);
1492 1445
1493 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1446 btc_alg_dbg(ALGO_TRACE,
1494 "[BTCoex], Wifi connected + " 1447 "[BTCoex], Wifi connected + BT non connected-idle!!\n");
1495 "BT non connected-idle!!\n");
1496 1448
1497 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 1449 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
1498 0xfffff, 0x0); 1450 0xfffff, 0x0);
@@ -1518,9 +1470,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
1518 1470
1519 if (bt_hs_on) 1471 if (bt_hs_on)
1520 return false; 1472 return false;
1521 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1473 btc_alg_dbg(ALGO_TRACE,
1522 "[BTCoex], Wifi connected + " 1474 "[BTCoex], Wifi connected + BT connected-idle!!\n");
1523 "BT connected-idle!!\n");
1524 1475
1525 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 1476 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
1526 0xfffff, 0x0); 1477 0xfffff, 0x0);
@@ -1544,17 +1495,15 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
1544 &low_pwr_disable); 1495 &low_pwr_disable);
1545 1496
1546 if (wifi_busy) { 1497 if (wifi_busy) {
1547 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1498 btc_alg_dbg(ALGO_TRACE,
1548 "[BTCoex], Wifi Connected-Busy + " 1499 "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
1549 "BT Busy!!\n");
1550 common = false; 1500 common = false;
1551 } else { 1501 } else {
1552 if (bt_hs_on) 1502 if (bt_hs_on)
1553 return false; 1503 return false;
1554 1504
1555 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1505 btc_alg_dbg(ALGO_TRACE,
1556 "[BTCoex], Wifi Connected-Idle + " 1506 "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
1557 "BT Busy!!\n");
1558 1507
1559 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 1508 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
1560 0x1, 0xfffff, 0x0); 1509 0x1, 0xfffff, 0x0);
@@ -1592,9 +1541,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
1592{ 1541{
1593 /* Set PS TDMA for max interval == 1 */ 1542 /* Set PS TDMA for max interval == 1 */
1594 if (tx_pause) { 1543 if (tx_pause) {
1595 BTC_PRINT(BTC_MSG_ALGORITHM, 1544 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1596 ALGO_TRACE_FW_DETAIL, 1545 "[BTCoex], TxPause = 1\n");
1597 "[BTCoex], TxPause = 1\n");
1598 1546
1599 if (coex_dm->cur_ps_tdma == 71) { 1547 if (coex_dm->cur_ps_tdma == 71) {
1600 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1548 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1690,9 +1638,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
1690 } 1638 }
1691 } 1639 }
1692 } else { 1640 } else {
1693 BTC_PRINT(BTC_MSG_ALGORITHM, 1641 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1694 ALGO_TRACE_FW_DETAIL, 1642 "[BTCoex], TxPause = 0\n");
1695 "[BTCoex], TxPause = 0\n");
1696 if (coex_dm->cur_ps_tdma == 5) { 1643 if (coex_dm->cur_ps_tdma == 5) {
1697 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71); 1644 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
1698 coex_dm->tdma_adj_type = 71; 1645 coex_dm->tdma_adj_type = 71;
@@ -1790,9 +1737,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
1790{ 1737{
1791 /* Set PS TDMA for max interval == 2 */ 1738 /* Set PS TDMA for max interval == 2 */
1792 if (tx_pause) { 1739 if (tx_pause) {
1793 BTC_PRINT(BTC_MSG_ALGORITHM, 1740 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1794 ALGO_TRACE_FW_DETAIL, 1741 "[BTCoex], TxPause = 1\n");
1795 "[BTCoex], TxPause = 1\n");
1796 if (coex_dm->cur_ps_tdma == 1) { 1742 if (coex_dm->cur_ps_tdma == 1) {
1797 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); 1743 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
1798 coex_dm->tdma_adj_type = 6; 1744 coex_dm->tdma_adj_type = 6;
@@ -1873,9 +1819,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
1873 } 1819 }
1874 } 1820 }
1875 } else { 1821 } else {
1876 BTC_PRINT(BTC_MSG_ALGORITHM, 1822 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1877 ALGO_TRACE_FW_DETAIL, 1823 "[BTCoex], TxPause = 0\n");
1878 "[BTCoex], TxPause = 0\n");
1879 if (coex_dm->cur_ps_tdma == 5) { 1824 if (coex_dm->cur_ps_tdma == 5) {
1880 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); 1825 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
1881 coex_dm->tdma_adj_type = 2; 1826 coex_dm->tdma_adj_type = 2;
@@ -1963,9 +1908,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
1963{ 1908{
1964 /* Set PS TDMA for max interval == 3 */ 1909 /* Set PS TDMA for max interval == 3 */
1965 if (tx_pause) { 1910 if (tx_pause) {
1966 BTC_PRINT(BTC_MSG_ALGORITHM, 1911 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1967 ALGO_TRACE_FW_DETAIL, 1912 "[BTCoex], TxPause = 1\n");
1968 "[BTCoex], TxPause = 1\n");
1969 if (coex_dm->cur_ps_tdma == 1) { 1913 if (coex_dm->cur_ps_tdma == 1) {
1970 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); 1914 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
1971 coex_dm->tdma_adj_type = 7; 1915 coex_dm->tdma_adj_type = 7;
@@ -2046,9 +1990,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
2046 } 1990 }
2047 } 1991 }
2048 } else { 1992 } else {
2049 BTC_PRINT(BTC_MSG_ALGORITHM, 1993 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2050 ALGO_TRACE_FW_DETAIL, 1994 "[BTCoex], TxPause = 0\n");
2051 "[BTCoex], TxPause = 0\n");
2052 if (coex_dm->cur_ps_tdma == 5) { 1995 if (coex_dm->cur_ps_tdma == 5) {
2053 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); 1996 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
2054 coex_dm->tdma_adj_type = 3; 1997 coex_dm->tdma_adj_type = 3;
@@ -2140,13 +2083,13 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2140 s32 result; 2083 s32 result;
2141 u8 retry_count = 0; 2084 u8 retry_count = 0;
2142 2085
2143 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 2086 btc_alg_dbg(ALGO_TRACE_FW,
2144 "[BTCoex], TdmaDurationAdjust()\n"); 2087 "[BTCoex], TdmaDurationAdjust()\n");
2145 2088
2146 if (!coex_dm->auto_tdma_adjust) { 2089 if (!coex_dm->auto_tdma_adjust) {
2147 coex_dm->auto_tdma_adjust = true; 2090 coex_dm->auto_tdma_adjust = true;
2148 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2091 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2149 "[BTCoex], first run TdmaDurationAdjust()!!\n"); 2092 "[BTCoex], first run TdmaDurationAdjust()!!\n");
2150 if (sco_hid) { 2093 if (sco_hid) {
2151 if (tx_pause) { 2094 if (tx_pause) {
2152 if (max_interval == 1) { 2095 if (max_interval == 1) {
@@ -2250,11 +2193,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2250 } else { 2193 } else {
2251 /*accquire the BT TRx retry count from BT_Info byte2*/ 2194 /*accquire the BT TRx retry count from BT_Info byte2*/
2252 retry_count = coex_sta->bt_retry_cnt; 2195 retry_count = coex_sta->bt_retry_cnt;
2253 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2196 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2254 "[BTCoex], retry_count = %d\n", retry_count); 2197 "[BTCoex], retry_count = %d\n", retry_count);
2255 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2198 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2256 "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n", 2199 "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
2257 up, dn, m, n, wait_count); 2200 up, dn, m, n, wait_count);
2258 result = 0; 2201 result = 0;
2259 wait_count++; 2202 wait_count++;
2260 /* no retry in the last 2-second duration*/ 2203 /* no retry in the last 2-second duration*/
@@ -2271,10 +2214,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2271 up = 0; 2214 up = 0;
2272 dn = 0; 2215 dn = 0;
2273 result = 1; 2216 result = 1;
2274 BTC_PRINT(BTC_MSG_ALGORITHM, 2217 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2275 ALGO_TRACE_FW_DETAIL, 2218 "[BTCoex], Increase wifi duration!!\n");
2276 "[BTCoex], Increase wifi "
2277 "duration!!\n");
2278 } /* <=3 retry in the last 2-second duration*/ 2219 } /* <=3 retry in the last 2-second duration*/
2279 } else if (retry_count <= 3) { 2220 } else if (retry_count <= 3) {
2280 up--; 2221 up--;
@@ -2297,10 +2238,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2297 dn = 0; 2238 dn = 0;
2298 wait_count = 0; 2239 wait_count = 0;
2299 result = -1; 2240 result = -1;
2300 BTC_PRINT(BTC_MSG_ALGORITHM, 2241 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2301 ALGO_TRACE_FW_DETAIL, 2242 "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
2302 "[BTCoex], Decrease wifi duration "
2303 "for retry_counter<3!!\n");
2304 } 2243 }
2305 } else { 2244 } else {
2306 if (wait_count == 1) 2245 if (wait_count == 1)
@@ -2316,13 +2255,12 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2316 dn = 0; 2255 dn = 0;
2317 wait_count = 0; 2256 wait_count = 0;
2318 result = -1; 2257 result = -1;
2319 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2258 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2320 "[BTCoex], Decrease wifi duration " 2259 "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
2321 "for retry_counter>3!!\n");
2322 } 2260 }
2323 2261
2324 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2262 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2325 "[BTCoex], max Interval = %d\n", max_interval); 2263 "[BTCoex], max Interval = %d\n", max_interval);
2326 if (max_interval == 1) 2264 if (max_interval == 1)
2327 set_tdma_int1(btcoexist, tx_pause, result); 2265 set_tdma_int1(btcoexist, tx_pause, result);
2328 else if (max_interval == 2) 2266 else if (max_interval == 2)
@@ -2336,10 +2274,9 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2336 */ 2274 */
2337 if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { 2275 if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
2338 bool scan = false, link = false, roam = false; 2276 bool scan = false, link = false, roam = false;
2339 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2277 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2340 "[BTCoex], PsTdma type dismatch!!!, " 2278 "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
2341 "curPsTdma=%d, recordPsTdma=%d\n", 2279 coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
2342 coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
2343 2280
2344 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); 2281 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
2345 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); 2282 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2349,9 +2286,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
2349 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2286 btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
2350 coex_dm->tdma_adj_type); 2287 coex_dm->tdma_adj_type);
2351 else 2288 else
2352 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2289 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2353 "[BTCoex], roaming/link/scan is under" 2290 "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
2354 " progress, will adjust next time!!!\n");
2355 } 2291 }
2356} 2292}
2357 2293
@@ -2989,27 +2925,26 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
2989{ 2925{
2990 u8 algorithm = 0; 2926 u8 algorithm = 0;
2991 2927
2992 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2928 btc_alg_dbg(ALGO_TRACE,
2993 "[BTCoex], RunCoexistMechanism()===>\n"); 2929 "[BTCoex], RunCoexistMechanism()===>\n");
2994 2930
2995 if (btcoexist->manual_control) { 2931 if (btcoexist->manual_control) {
2996 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2932 btc_alg_dbg(ALGO_TRACE,
2997 "[BTCoex], RunCoexistMechanism(), " 2933 "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
2998 "return for Manual CTRL <===\n");
2999 return; 2934 return;
3000 } 2935 }
3001 2936
3002 if (coex_sta->under_ips) { 2937 if (coex_sta->under_ips) {
3003 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2938 btc_alg_dbg(ALGO_TRACE,
3004 "[BTCoex], wifi is under IPS !!!\n"); 2939 "[BTCoex], wifi is under IPS !!!\n");
3005 return; 2940 return;
3006 } 2941 }
3007 2942
3008 algorithm = btc8723b2ant_action_algorithm(btcoexist); 2943 algorithm = btc8723b2ant_action_algorithm(btcoexist);
3009 if (coex_sta->c2h_bt_inquiry_page && 2944 if (coex_sta->c2h_bt_inquiry_page &&
3010 (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) { 2945 (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
3011 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2946 btc_alg_dbg(ALGO_TRACE,
3012 "[BTCoex], BT is under inquiry/page scan !!\n"); 2947 "[BTCoex], BT is under inquiry/page scan !!\n");
3013 btc8723b2ant_action_bt_inquiry(btcoexist); 2948 btc8723b2ant_action_bt_inquiry(btcoexist);
3014 return; 2949 return;
3015 } else { 2950 } else {
@@ -3021,84 +2956,75 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
3021 } 2956 }
3022 2957
3023 coex_dm->cur_algorithm = algorithm; 2958 coex_dm->cur_algorithm = algorithm;
3024 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d\n", 2959 btc_alg_dbg(ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
3025 coex_dm->cur_algorithm); 2960 coex_dm->cur_algorithm);
3026 2961
3027 if (btc8723b2ant_is_common_action(btcoexist)) { 2962 if (btc8723b2ant_is_common_action(btcoexist)) {
3028 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2963 btc_alg_dbg(ALGO_TRACE,
3029 "[BTCoex], Action 2-Ant common.\n"); 2964 "[BTCoex], Action 2-Ant common\n");
3030 coex_dm->auto_tdma_adjust = false; 2965 coex_dm->auto_tdma_adjust = false;
3031 } else { 2966 } else {
3032 if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { 2967 if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
3033 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2968 btc_alg_dbg(ALGO_TRACE,
3034 "[BTCoex], preAlgorithm=%d, " 2969 "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
3035 "curAlgorithm=%d\n", coex_dm->pre_algorithm, 2970 coex_dm->pre_algorithm,
3036 coex_dm->cur_algorithm); 2971 coex_dm->cur_algorithm);
3037 coex_dm->auto_tdma_adjust = false; 2972 coex_dm->auto_tdma_adjust = false;
3038 } 2973 }
3039 switch (coex_dm->cur_algorithm) { 2974 switch (coex_dm->cur_algorithm) {
3040 case BT_8723B_2ANT_COEX_ALGO_SCO: 2975 case BT_8723B_2ANT_COEX_ALGO_SCO:
3041 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2976 btc_alg_dbg(ALGO_TRACE,
3042 "[BTCoex], Action 2-Ant, algorithm = SCO.\n"); 2977 "[BTCoex], Action 2-Ant, algorithm = SCO\n");
3043 btc8723b2ant_action_sco(btcoexist); 2978 btc8723b2ant_action_sco(btcoexist);
3044 break; 2979 break;
3045 case BT_8723B_2ANT_COEX_ALGO_HID: 2980 case BT_8723B_2ANT_COEX_ALGO_HID:
3046 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2981 btc_alg_dbg(ALGO_TRACE,
3047 "[BTCoex], Action 2-Ant, algorithm = HID.\n"); 2982 "[BTCoex], Action 2-Ant, algorithm = HID\n");
3048 btc8723b2ant_action_hid(btcoexist); 2983 btc8723b2ant_action_hid(btcoexist);
3049 break; 2984 break;
3050 case BT_8723B_2ANT_COEX_ALGO_A2DP: 2985 case BT_8723B_2ANT_COEX_ALGO_A2DP:
3051 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2986 btc_alg_dbg(ALGO_TRACE,
3052 "[BTCoex], Action 2-Ant, " 2987 "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
3053 "algorithm = A2DP.\n");
3054 btc8723b2ant_action_a2dp(btcoexist); 2988 btc8723b2ant_action_a2dp(btcoexist);
3055 break; 2989 break;
3056 case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS: 2990 case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
3057 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2991 btc_alg_dbg(ALGO_TRACE,
3058 "[BTCoex], Action 2-Ant, " 2992 "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
3059 "algorithm = A2DP+PAN(HS).\n");
3060 btc8723b2ant_action_a2dp_pan_hs(btcoexist); 2993 btc8723b2ant_action_a2dp_pan_hs(btcoexist);
3061 break; 2994 break;
3062 case BT_8723B_2ANT_COEX_ALGO_PANEDR: 2995 case BT_8723B_2ANT_COEX_ALGO_PANEDR:
3063 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2996 btc_alg_dbg(ALGO_TRACE,
3064 "[BTCoex], Action 2-Ant, " 2997 "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
3065 "algorithm = PAN(EDR).\n");
3066 btc8723b2ant_action_pan_edr(btcoexist); 2998 btc8723b2ant_action_pan_edr(btcoexist);
3067 break; 2999 break;
3068 case BT_8723B_2ANT_COEX_ALGO_PANHS: 3000 case BT_8723B_2ANT_COEX_ALGO_PANHS:
3069 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3001 btc_alg_dbg(ALGO_TRACE,
3070 "[BTCoex], Action 2-Ant, " 3002 "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
3071 "algorithm = HS mode.\n");
3072 btc8723b2ant_action_pan_hs(btcoexist); 3003 btc8723b2ant_action_pan_hs(btcoexist);
3073 break; 3004 break;
3074 case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP: 3005 case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
3075 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3006 btc_alg_dbg(ALGO_TRACE,
3076 "[BTCoex], Action 2-Ant, " 3007 "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
3077 "algorithm = PAN+A2DP.\n");
3078 btc8723b2ant_action_pan_edr_a2dp(btcoexist); 3008 btc8723b2ant_action_pan_edr_a2dp(btcoexist);
3079 break; 3009 break;
3080 case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID: 3010 case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
3081 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3011 btc_alg_dbg(ALGO_TRACE,
3082 "[BTCoex], Action 2-Ant, " 3012 "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
3083 "algorithm = PAN(EDR)+HID.\n");
3084 btc8723b2ant_action_pan_edr_hid(btcoexist); 3013 btc8723b2ant_action_pan_edr_hid(btcoexist);
3085 break; 3014 break;
3086 case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR: 3015 case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
3087 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3016 btc_alg_dbg(ALGO_TRACE,
3088 "[BTCoex], Action 2-Ant, " 3017 "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
3089 "algorithm = HID+A2DP+PAN.\n");
3090 btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist); 3018 btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
3091 break; 3019 break;
3092 case BT_8723B_2ANT_COEX_ALGO_HID_A2DP: 3020 case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
3093 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3021 btc_alg_dbg(ALGO_TRACE,
3094 "[BTCoex], Action 2-Ant, " 3022 "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
3095 "algorithm = HID+A2DP.\n");
3096 btc8723b2ant_action_hid_a2dp(btcoexist); 3023 btc8723b2ant_action_hid_a2dp(btcoexist);
3097 break; 3024 break;
3098 default: 3025 default:
3099 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3026 btc_alg_dbg(ALGO_TRACE,
3100 "[BTCoex], Action 2-Ant, " 3027 "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
3101 "algorithm = coexist All Off!!\n");
3102 btc8723b2ant_coex_alloff(btcoexist); 3028 btc8723b2ant_coex_alloff(btcoexist);
3103 break; 3029 break;
3104 } 3030 }
@@ -3126,8 +3052,8 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
3126{ 3052{
3127 u8 u8tmp = 0; 3053 u8 u8tmp = 0;
3128 3054
3129 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3055 btc_iface_dbg(INTF_INIT,
3130 "[BTCoex], 2Ant Init HW Config!!\n"); 3056 "[BTCoex], 2Ant Init HW Config!!\n");
3131 coex_dm->bt_rf0x1e_backup = 3057 coex_dm->bt_rf0x1e_backup =
3132 btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff); 3058 btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
3133 3059
@@ -3152,8 +3078,8 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
3152 3078
3153void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist) 3079void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
3154{ 3080{
3155 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3081 btc_iface_dbg(INTF_INIT,
3156 "[BTCoex], Coex Mechanism Init!!\n"); 3082 "[BTCoex], Coex Mechanism Init!!\n");
3157 btc8723b2ant_init_coex_dm(btcoexist); 3083 btc8723b2ant_init_coex_dm(btcoexist);
3158} 3084}
3159 3085
@@ -3388,15 +3314,15 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
3388void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) 3314void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
3389{ 3315{
3390 if (BTC_IPS_ENTER == type) { 3316 if (BTC_IPS_ENTER == type) {
3391 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3317 btc_iface_dbg(INTF_NOTIFY,
3392 "[BTCoex], IPS ENTER notify\n"); 3318 "[BTCoex], IPS ENTER notify\n");
3393 coex_sta->under_ips = true; 3319 coex_sta->under_ips = true;
3394 btc8723b2ant_wifioff_hwcfg(btcoexist); 3320 btc8723b2ant_wifioff_hwcfg(btcoexist);
3395 btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); 3321 btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
3396 btc8723b2ant_coex_alloff(btcoexist); 3322 btc8723b2ant_coex_alloff(btcoexist);
3397 } else if (BTC_IPS_LEAVE == type) { 3323 } else if (BTC_IPS_LEAVE == type) {
3398 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3324 btc_iface_dbg(INTF_NOTIFY,
3399 "[BTCoex], IPS LEAVE notify\n"); 3325 "[BTCoex], IPS LEAVE notify\n");
3400 coex_sta->under_ips = false; 3326 coex_sta->under_ips = false;
3401 ex_btc8723b2ant_init_hwconfig(btcoexist); 3327 ex_btc8723b2ant_init_hwconfig(btcoexist);
3402 btc8723b2ant_init_coex_dm(btcoexist); 3328 btc8723b2ant_init_coex_dm(btcoexist);
@@ -3407,12 +3333,12 @@ void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
3407void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) 3333void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
3408{ 3334{
3409 if (BTC_LPS_ENABLE == type) { 3335 if (BTC_LPS_ENABLE == type) {
3410 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3336 btc_iface_dbg(INTF_NOTIFY,
3411 "[BTCoex], LPS ENABLE notify\n"); 3337 "[BTCoex], LPS ENABLE notify\n");
3412 coex_sta->under_lps = true; 3338 coex_sta->under_lps = true;
3413 } else if (BTC_LPS_DISABLE == type) { 3339 } else if (BTC_LPS_DISABLE == type) {
3414 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3340 btc_iface_dbg(INTF_NOTIFY,
3415 "[BTCoex], LPS DISABLE notify\n"); 3341 "[BTCoex], LPS DISABLE notify\n");
3416 coex_sta->under_lps = false; 3342 coex_sta->under_lps = false;
3417 } 3343 }
3418} 3344}
@@ -3420,21 +3346,21 @@ void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
3420void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) 3346void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
3421{ 3347{
3422 if (BTC_SCAN_START == type) 3348 if (BTC_SCAN_START == type)
3423 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3349 btc_iface_dbg(INTF_NOTIFY,
3424 "[BTCoex], SCAN START notify\n"); 3350 "[BTCoex], SCAN START notify\n");
3425 else if (BTC_SCAN_FINISH == type) 3351 else if (BTC_SCAN_FINISH == type)
3426 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3352 btc_iface_dbg(INTF_NOTIFY,
3427 "[BTCoex], SCAN FINISH notify\n"); 3353 "[BTCoex], SCAN FINISH notify\n");
3428} 3354}
3429 3355
3430void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) 3356void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
3431{ 3357{
3432 if (BTC_ASSOCIATE_START == type) 3358 if (BTC_ASSOCIATE_START == type)
3433 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3359 btc_iface_dbg(INTF_NOTIFY,
3434 "[BTCoex], CONNECT START notify\n"); 3360 "[BTCoex], CONNECT START notify\n");
3435 else if (BTC_ASSOCIATE_FINISH == type) 3361 else if (BTC_ASSOCIATE_FINISH == type)
3436 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3362 btc_iface_dbg(INTF_NOTIFY,
3437 "[BTCoex], CONNECT FINISH notify\n"); 3363 "[BTCoex], CONNECT FINISH notify\n");
3438} 3364}
3439 3365
3440void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, 3366void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -3445,11 +3371,11 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
3445 u8 wifi_central_chnl; 3371 u8 wifi_central_chnl;
3446 3372
3447 if (BTC_MEDIA_CONNECT == type) 3373 if (BTC_MEDIA_CONNECT == type)
3448 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3374 btc_iface_dbg(INTF_NOTIFY,
3449 "[BTCoex], MEDIA connect notify\n"); 3375 "[BTCoex], MEDIA connect notify\n");
3450 else 3376 else
3451 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3377 btc_iface_dbg(INTF_NOTIFY,
3452 "[BTCoex], MEDIA disconnect notify\n"); 3378 "[BTCoex], MEDIA disconnect notify\n");
3453 3379
3454 /* only 2.4G we need to inform bt the chnl mask */ 3380 /* only 2.4G we need to inform bt the chnl mask */
3455 btcoexist->btc_get(btcoexist, 3381 btcoexist->btc_get(btcoexist,
@@ -3470,10 +3396,10 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
3470 coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; 3396 coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
3471 coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; 3397 coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
3472 3398
3473 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 3399 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
3474 "[BTCoex], FW write 0x66=0x%x\n", 3400 "[BTCoex], FW write 0x66=0x%x\n",
3475 h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | 3401 h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
3476 h2c_parameter[2]); 3402 h2c_parameter[2]);
3477 3403
3478 btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); 3404 btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
3479} 3405}
@@ -3482,8 +3408,8 @@ void ex_btc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
3482 u8 type) 3408 u8 type)
3483{ 3409{
3484 if (type == BTC_PACKET_DHCP) 3410 if (type == BTC_PACKET_DHCP)
3485 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3411 btc_iface_dbg(INTF_NOTIFY,
3486 "[BTCoex], DHCP Packet notify\n"); 3412 "[BTCoex], DHCP Packet notify\n");
3487} 3413}
3488 3414
3489void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, 3415void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -3501,25 +3427,24 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
3501 rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW; 3427 rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
3502 coex_sta->bt_info_c2h_cnt[rsp_source]++; 3428 coex_sta->bt_info_c2h_cnt[rsp_source]++;
3503 3429
3504 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3430 btc_iface_dbg(INTF_NOTIFY,
3505 "[BTCoex], Bt info[%d], length=%d, hex data=[", 3431 "[BTCoex], Bt info[%d], length=%d, hex data=[",
3506 rsp_source, length); 3432 rsp_source, length);
3507 for (i = 0; i < length; i++) { 3433 for (i = 0; i < length; i++) {
3508 coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i]; 3434 coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
3509 if (i == 1) 3435 if (i == 1)
3510 bt_info = tmpbuf[i]; 3436 bt_info = tmpbuf[i];
3511 if (i == length-1) 3437 if (i == length-1)
3512 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3438 btc_iface_dbg(INTF_NOTIFY,
3513 "0x%02x]\n", tmpbuf[i]); 3439 "0x%02x]\n", tmpbuf[i]);
3514 else 3440 else
3515 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3441 btc_iface_dbg(INTF_NOTIFY,
3516 "0x%02x, ", tmpbuf[i]); 3442 "0x%02x, ", tmpbuf[i]);
3517 } 3443 }
3518 3444
3519 if (btcoexist->manual_control) { 3445 if (btcoexist->manual_control) {
3520 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3446 btc_alg_dbg(ALGO_TRACE,
3521 "[BTCoex], BtInfoNotify(), " 3447 "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
3522 "return for Manual CTRL<===\n");
3523 return; 3448 return;
3524 } 3449 }
3525 3450
@@ -3537,9 +3462,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
3537 because bt is reset and loss of the info. 3462 because bt is reset and loss of the info.
3538 */ 3463 */
3539 if ((coex_sta->bt_info_ext & BIT1)) { 3464 if ((coex_sta->bt_info_ext & BIT1)) {
3540 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3465 btc_alg_dbg(ALGO_TRACE,
3541 "[BTCoex], BT ext info bit1 check," 3466 "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
3542 " send wifi BW&Chnl to BT!!\n");
3543 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, 3467 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
3544 &wifi_connected); 3468 &wifi_connected);
3545 if (wifi_connected) 3469 if (wifi_connected)
@@ -3553,9 +3477,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
3553 } 3477 }
3554 3478
3555 if ((coex_sta->bt_info_ext & BIT3)) { 3479 if ((coex_sta->bt_info_ext & BIT3)) {
3556 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3480 btc_alg_dbg(ALGO_TRACE,
3557 "[BTCoex], BT ext info bit3 check, " 3481 "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
3558 "set BT NOT to ignore Wlan active!!\n");
3559 btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, 3482 btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
3560 false); 3483 false);
3561 } else { 3484 } else {
@@ -3608,28 +3531,26 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
3608 3531
3609 if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) { 3532 if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
3610 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE; 3533 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
3611 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3534 btc_alg_dbg(ALGO_TRACE,
3612 "[BTCoex], BtInfoNotify(), " 3535 "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
3613 "BT Non-Connected idle!!!\n");
3614 /* connection exists but no busy */ 3536 /* connection exists but no busy */
3615 } else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) { 3537 } else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
3616 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE; 3538 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
3617 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3539 btc_alg_dbg(ALGO_TRACE,
3618 "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); 3540 "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
3619 } else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) || 3541 } else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
3620 (bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) { 3542 (bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
3621 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY; 3543 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
3622 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3544 btc_alg_dbg(ALGO_TRACE,
3623 "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); 3545 "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
3624 } else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) { 3546 } else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
3625 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY; 3547 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
3626 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3548 btc_alg_dbg(ALGO_TRACE,
3627 "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); 3549 "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
3628 } else { 3550 } else {
3629 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX; 3551 coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
3630 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3552 btc_alg_dbg(ALGO_TRACE,
3631 "[BTCoex], BtInfoNotify(), " 3553 "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
3632 "BT Non-Defined state!!!\n");
3633 } 3554 }
3634 3555
3635 if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || 3556 if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3652,7 +3573,7 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
3652 3573
3653void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist) 3574void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
3654{ 3575{
3655 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); 3576 btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
3656 3577
3657 btc8723b2ant_wifioff_hwcfg(btcoexist); 3578 btc8723b2ant_wifioff_hwcfg(btcoexist);
3658 btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); 3579 btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
@@ -3666,33 +3587,31 @@ void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist)
3666 static u8 dis_ver_info_cnt; 3587 static u8 dis_ver_info_cnt;
3667 u32 fw_ver = 0, bt_patch_ver = 0; 3588 u32 fw_ver = 0, bt_patch_ver = 0;
3668 3589
3669 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3590 btc_alg_dbg(ALGO_TRACE,
3670 "[BTCoex], ==========================" 3591 "[BTCoex], ==========================Periodical===========================\n");
3671 "Periodical===========================\n");
3672 3592
3673 if (dis_ver_info_cnt <= 5) { 3593 if (dis_ver_info_cnt <= 5) {
3674 dis_ver_info_cnt += 1; 3594 dis_ver_info_cnt += 1;
3675 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3595 btc_iface_dbg(INTF_INIT,
3676 "[BTCoex], ****************************" 3596 "[BTCoex], ****************************************************************\n");
3677 "************************************\n"); 3597 btc_iface_dbg(INTF_INIT,
3678 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3598 "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
3679 "[BTCoex], Ant PG Num/ Ant Mech/ " 3599 board_info->pg_ant_num,
3680 "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num, 3600 board_info->btdm_ant_num,
3681 board_info->btdm_ant_num, board_info->btdm_ant_pos); 3601 board_info->btdm_ant_pos);
3682 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3602 btc_iface_dbg(INTF_INIT,
3683 "[BTCoex], BT stack/ hci ext ver = %s / %d\n", 3603 "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
3684 ((stack_info->profile_notified) ? "Yes" : "No"), 3604 stack_info->profile_notified ? "Yes" : "No",
3685 stack_info->hci_version); 3605 stack_info->hci_version);
3686 btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, 3606 btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
3687 &bt_patch_ver); 3607 &bt_patch_ver);
3688 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); 3608 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
3689 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3609 btc_iface_dbg(INTF_INIT,
3690 "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", 3610 "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
3691 glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, 3611 glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
3692 fw_ver, bt_patch_ver, bt_patch_ver); 3612 fw_ver, bt_patch_ver, bt_patch_ver);
3693 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3613 btc_iface_dbg(INTF_INIT,
3694 "[BTCoex], *****************************" 3614 "[BTCoex], ****************************************************************\n");
3695 "***********************************\n");
3696 } 3615 }
3697 3616
3698#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0) 3617#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index 9cecf174a37d..3ce47c70bfa4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -76,28 +76,28 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
76 if (bt_rssi >= (rssi_thresh + 76 if (bt_rssi >= (rssi_thresh +
77 BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { 77 BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
78 bt_rssi_state = BTC_RSSI_STATE_HIGH; 78 bt_rssi_state = BTC_RSSI_STATE_HIGH;
79 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 79 btc_alg_dbg(ALGO_BT_RSSI_STATE,
80 "[BTCoex], BT Rssi state switch to High\n"); 80 "[BTCoex], BT Rssi state switch to High\n");
81 } else { 81 } else {
82 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; 82 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
83 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 83 btc_alg_dbg(ALGO_BT_RSSI_STATE,
84 "[BTCoex], BT Rssi state stay at Low\n"); 84 "[BTCoex], BT Rssi state stay at Low\n");
85 } 85 }
86 } else { 86 } else {
87 if (bt_rssi < rssi_thresh) { 87 if (bt_rssi < rssi_thresh) {
88 bt_rssi_state = BTC_RSSI_STATE_LOW; 88 bt_rssi_state = BTC_RSSI_STATE_LOW;
89 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 89 btc_alg_dbg(ALGO_BT_RSSI_STATE,
90 "[BTCoex], BT Rssi state switch to Low\n"); 90 "[BTCoex], BT Rssi state switch to Low\n");
91 } else { 91 } else {
92 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 92 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
93 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 93 btc_alg_dbg(ALGO_BT_RSSI_STATE,
94 "[BTCoex], BT Rssi state stay at High\n"); 94 "[BTCoex], BT Rssi state stay at High\n");
95 } 95 }
96 } 96 }
97 } else if (level_num == 3) { 97 } else if (level_num == 3) {
98 if (rssi_thresh > rssi_thresh1) { 98 if (rssi_thresh > rssi_thresh1) {
99 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 99 btc_alg_dbg(ALGO_BT_RSSI_STATE,
100 "[BTCoex], BT Rssi thresh error!!\n"); 100 "[BTCoex], BT Rssi thresh error!!\n");
101 return coex_sta->pre_bt_rssi_state; 101 return coex_sta->pre_bt_rssi_state;
102 } 102 }
103 103
@@ -106,12 +106,12 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
106 if (bt_rssi >= (rssi_thresh + 106 if (bt_rssi >= (rssi_thresh +
107 BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { 107 BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
108 bt_rssi_state = BTC_RSSI_STATE_MEDIUM; 108 bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
109 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 109 btc_alg_dbg(ALGO_BT_RSSI_STATE,
110 "[BTCoex], BT Rssi state switch to Medium\n"); 110 "[BTCoex], BT Rssi state switch to Medium\n");
111 } else { 111 } else {
112 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; 112 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
113 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 113 btc_alg_dbg(ALGO_BT_RSSI_STATE,
114 "[BTCoex], BT Rssi state stay at Low\n"); 114 "[BTCoex], BT Rssi state stay at Low\n");
115 } 115 }
116 } else if ((coex_sta->pre_bt_rssi_state == 116 } else if ((coex_sta->pre_bt_rssi_state ==
117 BTC_RSSI_STATE_MEDIUM) || 117 BTC_RSSI_STATE_MEDIUM) ||
@@ -120,26 +120,26 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
120 if (bt_rssi >= (rssi_thresh1 + 120 if (bt_rssi >= (rssi_thresh1 +
121 BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { 121 BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
122 bt_rssi_state = BTC_RSSI_STATE_HIGH; 122 bt_rssi_state = BTC_RSSI_STATE_HIGH;
123 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 123 btc_alg_dbg(ALGO_BT_RSSI_STATE,
124 "[BTCoex], BT Rssi state switch to High\n"); 124 "[BTCoex], BT Rssi state switch to High\n");
125 } else if (bt_rssi < rssi_thresh) { 125 } else if (bt_rssi < rssi_thresh) {
126 bt_rssi_state = BTC_RSSI_STATE_LOW; 126 bt_rssi_state = BTC_RSSI_STATE_LOW;
127 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 127 btc_alg_dbg(ALGO_BT_RSSI_STATE,
128 "[BTCoex], BT Rssi state switch to Low\n"); 128 "[BTCoex], BT Rssi state switch to Low\n");
129 } else { 129 } else {
130 bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; 130 bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
131 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 131 btc_alg_dbg(ALGO_BT_RSSI_STATE,
132 "[BTCoex], BT Rssi state stay at Medium\n"); 132 "[BTCoex], BT Rssi state stay at Medium\n");
133 } 133 }
134 } else { 134 } else {
135 if (bt_rssi < rssi_thresh1) { 135 if (bt_rssi < rssi_thresh1) {
136 bt_rssi_state = BTC_RSSI_STATE_MEDIUM; 136 bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
137 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 137 btc_alg_dbg(ALGO_BT_RSSI_STATE,
138 "[BTCoex], BT Rssi state switch to Medium\n"); 138 "[BTCoex], BT Rssi state switch to Medium\n");
139 } else { 139 } else {
140 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 140 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
141 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 141 btc_alg_dbg(ALGO_BT_RSSI_STATE,
142 "[BTCoex], BT Rssi state stay at High\n"); 142 "[BTCoex], BT Rssi state stay at High\n");
143 } 143 }
144 } 144 }
145 } 145 }
@@ -165,32 +165,28 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
165 if (wifi_rssi >= 165 if (wifi_rssi >=
166 (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { 166 (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
167 wifi_rssi_state = BTC_RSSI_STATE_HIGH; 167 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
168 BTC_PRINT(BTC_MSG_ALGORITHM, 168 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
169 ALGO_WIFI_RSSI_STATE, 169 "[BTCoex], wifi RSSI state switch to High\n");
170 "[BTCoex], wifi RSSI state switch to High\n");
171 } else { 170 } else {
172 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; 171 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
173 BTC_PRINT(BTC_MSG_ALGORITHM, 172 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
174 ALGO_WIFI_RSSI_STATE, 173 "[BTCoex], wifi RSSI state stay at Low\n");
175 "[BTCoex], wifi RSSI state stay at Low\n");
176 } 174 }
177 } else { 175 } else {
178 if (wifi_rssi < rssi_thresh) { 176 if (wifi_rssi < rssi_thresh) {
179 wifi_rssi_state = BTC_RSSI_STATE_LOW; 177 wifi_rssi_state = BTC_RSSI_STATE_LOW;
180 BTC_PRINT(BTC_MSG_ALGORITHM, 178 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
181 ALGO_WIFI_RSSI_STATE, 179 "[BTCoex], wifi RSSI state switch to Low\n");
182 "[BTCoex], wifi RSSI state switch to Low\n");
183 } else { 180 } else {
184 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 181 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
185 BTC_PRINT(BTC_MSG_ALGORITHM, 182 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
186 ALGO_WIFI_RSSI_STATE, 183 "[BTCoex], wifi RSSI state stay at High\n");
187 "[BTCoex], wifi RSSI state stay at High\n");
188 } 184 }
189 } 185 }
190 } else if (level_num == 3) { 186 } else if (level_num == 3) {
191 if (rssi_thresh > rssi_thresh1) { 187 if (rssi_thresh > rssi_thresh1) {
192 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, 188 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
193 "[BTCoex], wifi RSSI thresh error!!\n"); 189 "[BTCoex], wifi RSSI thresh error!!\n");
194 return coex_sta->pre_wifi_rssi_state[index]; 190 return coex_sta->pre_wifi_rssi_state[index];
195 } 191 }
196 192
@@ -201,14 +197,12 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
201 if (wifi_rssi >= 197 if (wifi_rssi >=
202 (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { 198 (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
203 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; 199 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
204 BTC_PRINT(BTC_MSG_ALGORITHM, 200 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
205 ALGO_WIFI_RSSI_STATE, 201 "[BTCoex], wifi RSSI state switch to Medium\n");
206 "[BTCoex], wifi RSSI state switch to Medium\n");
207 } else { 202 } else {
208 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; 203 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
209 BTC_PRINT(BTC_MSG_ALGORITHM, 204 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
210 ALGO_WIFI_RSSI_STATE, 205 "[BTCoex], wifi RSSI state stay at Low\n");
211 "[BTCoex], wifi RSSI state stay at Low\n");
212 } 206 }
213 } else if ((coex_sta->pre_wifi_rssi_state[index] == 207 } else if ((coex_sta->pre_wifi_rssi_state[index] ==
214 BTC_RSSI_STATE_MEDIUM) || 208 BTC_RSSI_STATE_MEDIUM) ||
@@ -218,31 +212,26 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
218 (rssi_thresh1 + 212 (rssi_thresh1 +
219 BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { 213 BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
220 wifi_rssi_state = BTC_RSSI_STATE_HIGH; 214 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
221 BTC_PRINT(BTC_MSG_ALGORITHM, 215 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
222 ALGO_WIFI_RSSI_STATE, 216 "[BTCoex], wifi RSSI state switch to High\n");
223 "[BTCoex], wifi RSSI state switch to High\n");
224 } else if (wifi_rssi < rssi_thresh) { 217 } else if (wifi_rssi < rssi_thresh) {
225 wifi_rssi_state = BTC_RSSI_STATE_LOW; 218 wifi_rssi_state = BTC_RSSI_STATE_LOW;
226 BTC_PRINT(BTC_MSG_ALGORITHM, 219 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
227 ALGO_WIFI_RSSI_STATE, 220 "[BTCoex], wifi RSSI state switch to Low\n");
228 "[BTCoex], wifi RSSI state switch to Low\n");
229 } else { 221 } else {
230 wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; 222 wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
231 BTC_PRINT(BTC_MSG_ALGORITHM, 223 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
232 ALGO_WIFI_RSSI_STATE, 224 "[BTCoex], wifi RSSI state stay at Medium\n");
233 "[BTCoex], wifi RSSI state stay at Medium\n");
234 } 225 }
235 } else { 226 } else {
236 if (wifi_rssi < rssi_thresh1) { 227 if (wifi_rssi < rssi_thresh1) {
237 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; 228 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
238 BTC_PRINT(BTC_MSG_ALGORITHM, 229 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
239 ALGO_WIFI_RSSI_STATE, 230 "[BTCoex], wifi RSSI state switch to Medium\n");
240 "[BTCoex], wifi RSSI state switch to Medium\n");
241 } else { 231 } else {
242 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 232 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
243 BTC_PRINT(BTC_MSG_ALGORITHM, 233 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
244 ALGO_WIFI_RSSI_STATE, 234 "[BTCoex], wifi RSSI state stay at High\n");
245 "[BTCoex], wifi RSSI state stay at High\n");
246 } 235 }
247 } 236 }
248 } 237 }
@@ -431,9 +420,9 @@ static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
431 420
432 h2c_parameter[0] |= BIT0; /* trigger*/ 421 h2c_parameter[0] |= BIT0; /* trigger*/
433 422
434 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 423 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
435 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", 424 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
436 h2c_parameter[0]); 425 h2c_parameter[0]);
437 426
438 btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); 427 btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
439} 428}
@@ -504,8 +493,8 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
504 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); 493 btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
505 494
506 if (!bt_link_info->bt_link_exist) { 495 if (!bt_link_info->bt_link_exist) {
507 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 496 btc_alg_dbg(ALGO_TRACE,
508 "[BTCoex], No BT link exists!!!\n"); 497 "[BTCoex], No BT link exists!!!\n");
509 return algorithm; 498 return algorithm;
510 } 499 }
511 500
@@ -520,26 +509,26 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
520 509
521 if (num_of_diff_profile == 1) { 510 if (num_of_diff_profile == 1) {
522 if (bt_link_info->sco_exist) { 511 if (bt_link_info->sco_exist) {
523 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 512 btc_alg_dbg(ALGO_TRACE,
524 "[BTCoex], BT Profile = SCO only\n"); 513 "[BTCoex], BT Profile = SCO only\n");
525 algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; 514 algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
526 } else { 515 } else {
527 if (bt_link_info->hid_exist) { 516 if (bt_link_info->hid_exist) {
528 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 517 btc_alg_dbg(ALGO_TRACE,
529 "[BTCoex], BT Profile = HID only\n"); 518 "[BTCoex], BT Profile = HID only\n");
530 algorithm = BT_8821A_1ANT_COEX_ALGO_HID; 519 algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
531 } else if (bt_link_info->a2dp_exist) { 520 } else if (bt_link_info->a2dp_exist) {
532 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 521 btc_alg_dbg(ALGO_TRACE,
533 "[BTCoex], BT Profile = A2DP only\n"); 522 "[BTCoex], BT Profile = A2DP only\n");
534 algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP; 523 algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP;
535 } else if (bt_link_info->pan_exist) { 524 } else if (bt_link_info->pan_exist) {
536 if (bt_hs_on) { 525 if (bt_hs_on) {
537 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 526 btc_alg_dbg(ALGO_TRACE,
538 "[BTCoex], BT Profile = PAN(HS) only\n"); 527 "[BTCoex], BT Profile = PAN(HS) only\n");
539 algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS; 528 algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS;
540 } else { 529 } else {
541 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 530 btc_alg_dbg(ALGO_TRACE,
542 "[BTCoex], BT Profile = PAN(EDR) only\n"); 531 "[BTCoex], BT Profile = PAN(EDR) only\n");
543 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR; 532 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR;
544 } 533 }
545 } 534 }
@@ -547,50 +536,50 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
547 } else if (num_of_diff_profile == 2) { 536 } else if (num_of_diff_profile == 2) {
548 if (bt_link_info->sco_exist) { 537 if (bt_link_info->sco_exist) {
549 if (bt_link_info->hid_exist) { 538 if (bt_link_info->hid_exist) {
550 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 539 btc_alg_dbg(ALGO_TRACE,
551 "[BTCoex], BT Profile = SCO + HID\n"); 540 "[BTCoex], BT Profile = SCO + HID\n");
552 algorithm = BT_8821A_1ANT_COEX_ALGO_HID; 541 algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
553 } else if (bt_link_info->a2dp_exist) { 542 } else if (bt_link_info->a2dp_exist) {
554 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 543 btc_alg_dbg(ALGO_TRACE,
555 "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); 544 "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
556 algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; 545 algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
557 } else if (bt_link_info->pan_exist) { 546 } else if (bt_link_info->pan_exist) {
558 if (bt_hs_on) { 547 if (bt_hs_on) {
559 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 548 btc_alg_dbg(ALGO_TRACE,
560 "[BTCoex], BT Profile = SCO + PAN(HS)\n"); 549 "[BTCoex], BT Profile = SCO + PAN(HS)\n");
561 algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; 550 algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
562 } else { 551 } else {
563 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 552 btc_alg_dbg(ALGO_TRACE,
564 "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); 553 "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
565 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; 554 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
566 } 555 }
567 } 556 }
568 } else { 557 } else {
569 if (bt_link_info->hid_exist && 558 if (bt_link_info->hid_exist &&
570 bt_link_info->a2dp_exist) { 559 bt_link_info->a2dp_exist) {
571 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 560 btc_alg_dbg(ALGO_TRACE,
572 "[BTCoex], BT Profile = HID + A2DP\n"); 561 "[BTCoex], BT Profile = HID + A2DP\n");
573 algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; 562 algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
574 } else if (bt_link_info->hid_exist && 563 } else if (bt_link_info->hid_exist &&
575 bt_link_info->pan_exist) { 564 bt_link_info->pan_exist) {
576 if (bt_hs_on) { 565 if (bt_hs_on) {
577 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 566 btc_alg_dbg(ALGO_TRACE,
578 "[BTCoex], BT Profile = HID + PAN(HS)\n"); 567 "[BTCoex], BT Profile = HID + PAN(HS)\n");
579 algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; 568 algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
580 } else { 569 } else {
581 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 570 btc_alg_dbg(ALGO_TRACE,
582 "[BTCoex], BT Profile = HID + PAN(EDR)\n"); 571 "[BTCoex], BT Profile = HID + PAN(EDR)\n");
583 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; 572 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
584 } 573 }
585 } else if (bt_link_info->pan_exist && 574 } else if (bt_link_info->pan_exist &&
586 bt_link_info->a2dp_exist) { 575 bt_link_info->a2dp_exist) {
587 if (bt_hs_on) { 576 if (bt_hs_on) {
588 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 577 btc_alg_dbg(ALGO_TRACE,
589 "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); 578 "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
590 algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS; 579 algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS;
591 } else { 580 } else {
592 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 581 btc_alg_dbg(ALGO_TRACE,
593 "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); 582 "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
594 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP; 583 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP;
595 } 584 }
596 } 585 }
@@ -599,29 +588,29 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
599 if (bt_link_info->sco_exist) { 588 if (bt_link_info->sco_exist) {
600 if (bt_link_info->hid_exist && 589 if (bt_link_info->hid_exist &&
601 bt_link_info->a2dp_exist) { 590 bt_link_info->a2dp_exist) {
602 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 591 btc_alg_dbg(ALGO_TRACE,
603 "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); 592 "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
604 algorithm = BT_8821A_1ANT_COEX_ALGO_HID; 593 algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
605 } else if (bt_link_info->hid_exist && 594 } else if (bt_link_info->hid_exist &&
606 bt_link_info->pan_exist) { 595 bt_link_info->pan_exist) {
607 if (bt_hs_on) { 596 if (bt_hs_on) {
608 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 597 btc_alg_dbg(ALGO_TRACE,
609 "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); 598 "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
610 algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; 599 algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
611 } else { 600 } else {
612 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 601 btc_alg_dbg(ALGO_TRACE,
613 "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); 602 "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
614 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; 603 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
615 } 604 }
616 } else if (bt_link_info->pan_exist && 605 } else if (bt_link_info->pan_exist &&
617 bt_link_info->a2dp_exist) { 606 bt_link_info->a2dp_exist) {
618 if (bt_hs_on) { 607 if (bt_hs_on) {
619 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 608 btc_alg_dbg(ALGO_TRACE,
620 "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); 609 "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
621 algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; 610 algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
622 } else { 611 } else {
623 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 612 btc_alg_dbg(ALGO_TRACE,
624 "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); 613 "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
625 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; 614 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
626 } 615 }
627 } 616 }
@@ -630,12 +619,12 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
630 bt_link_info->pan_exist && 619 bt_link_info->pan_exist &&
631 bt_link_info->a2dp_exist) { 620 bt_link_info->a2dp_exist) {
632 if (bt_hs_on) { 621 if (bt_hs_on) {
633 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 622 btc_alg_dbg(ALGO_TRACE,
634 "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); 623 "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
635 algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; 624 algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
636 } else { 625 } else {
637 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 626 btc_alg_dbg(ALGO_TRACE,
638 "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); 627 "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
639 algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR; 628 algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
640 } 629 }
641 } 630 }
@@ -646,12 +635,12 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
646 bt_link_info->pan_exist && 635 bt_link_info->pan_exist &&
647 bt_link_info->a2dp_exist) { 636 bt_link_info->a2dp_exist) {
648 if (bt_hs_on) { 637 if (bt_hs_on) {
649 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 638 btc_alg_dbg(ALGO_TRACE,
650 "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); 639 "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
651 640
652 } else { 641 } else {
653 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 642 btc_alg_dbg(ALGO_TRACE,
654 "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); 643 "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
655 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; 644 algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
656 } 645 }
657 } 646 }
@@ -670,10 +659,10 @@ static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
670 if (enable_auto_report) 659 if (enable_auto_report)
671 h2c_parameter[0] |= BIT0; 660 h2c_parameter[0] |= BIT0;
672 661
673 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 662 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
674 "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", 663 "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
675 (enable_auto_report ? "Enabled!!" : "Disabled!!"), 664 (enable_auto_report ? "Enabled!!" : "Disabled!!"),
676 h2c_parameter[0]); 665 h2c_parameter[0]);
677 666
678 btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); 667 btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
679} 668}
@@ -682,17 +671,16 @@ static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist,
682 bool force_exec, 671 bool force_exec,
683 bool enable_auto_report) 672 bool enable_auto_report)
684{ 673{
685 BTC_PRINT(BTC_MSG_ALGORITHM, 674 btc_alg_dbg(ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n",
686 ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n", 675 (force_exec ? "force to" : ""), ((enable_auto_report) ?
687 (force_exec ? "force to" : ""), ((enable_auto_report) ? 676 "Enabled" : "Disabled"));
688 "Enabled" : "Disabled"));
689 coex_dm->cur_bt_auto_report = enable_auto_report; 677 coex_dm->cur_bt_auto_report = enable_auto_report;
690 678
691 if (!force_exec) { 679 if (!force_exec) {
692 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 680 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
693 "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", 681 "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
694 coex_dm->pre_bt_auto_report, 682 coex_dm->pre_bt_auto_report,
695 coex_dm->cur_bt_auto_report); 683 coex_dm->cur_bt_auto_report);
696 684
697 if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) 685 if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
698 return; 686 return;
@@ -718,9 +706,9 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
718 h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/ 706 h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
719 } 707 }
720 708
721 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 709 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
722 "[BTCoex], set WiFi Low-Penalty Retry: %s", 710 "[BTCoex], set WiFi Low-Penalty Retry: %s",
723 (low_penalty_ra ? "ON!!" : "OFF!!")); 711 (low_penalty_ra ? "ON!!" : "OFF!!"));
724 712
725 btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); 713 btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
726} 714}
@@ -743,20 +731,20 @@ static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
743 u32 val0x6c0, u32 val0x6c4, 731 u32 val0x6c0, u32 val0x6c4,
744 u32 val0x6c8, u8 val0x6cc) 732 u32 val0x6c8, u8 val0x6cc)
745{ 733{
746 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 734 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
747 "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); 735 "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
748 btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); 736 btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
749 737
750 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 738 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
751 "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); 739 "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
752 btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); 740 btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
753 741
754 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 742 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
755 "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); 743 "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
756 btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); 744 btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
757 745
758 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 746 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
759 "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); 747 "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
760 btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); 748 btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
761} 749}
762 750
@@ -764,10 +752,10 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist,
764 bool force_exec, u32 val0x6c0, 752 bool force_exec, u32 val0x6c0,
765 u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) 753 u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
766{ 754{
767 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 755 btc_alg_dbg(ALGO_TRACE_SW,
768 "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", 756 "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
769 (force_exec ? "force to" : ""), val0x6c0, val0x6c4, 757 (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
770 val0x6c8, val0x6cc); 758 val0x6c8, val0x6cc);
771 coex_dm->cur_val_0x6c0 = val0x6c0; 759 coex_dm->cur_val_0x6c0 = val0x6c0;
772 coex_dm->cur_val_0x6c4 = val0x6c4; 760 coex_dm->cur_val_0x6c4 = val0x6c4;
773 coex_dm->cur_val_0x6c8 = val0x6c8; 761 coex_dm->cur_val_0x6c8 = val0x6c8;
@@ -839,9 +827,9 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
839 if (enable) 827 if (enable)
840 h2c_parameter[0] |= BIT0; /* function enable*/ 828 h2c_parameter[0] |= BIT0; /* function enable*/
841 829
842 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 830 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
843 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", 831 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
844 h2c_parameter[0]); 832 h2c_parameter[0]);
845 833
846 btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); 834 btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
847} 835}
@@ -849,16 +837,16 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
849static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist, 837static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
850 bool force_exec, bool enable) 838 bool force_exec, bool enable)
851{ 839{
852 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 840 btc_alg_dbg(ALGO_TRACE_FW,
853 "[BTCoex], %s turn Ignore WlanAct %s\n", 841 "[BTCoex], %s turn Ignore WlanAct %s\n",
854 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); 842 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
855 coex_dm->cur_ignore_wlan_act = enable; 843 coex_dm->cur_ignore_wlan_act = enable;
856 844
857 if (!force_exec) { 845 if (!force_exec) {
858 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 846 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
859 "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", 847 "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
860 coex_dm->pre_ignore_wlan_act, 848 coex_dm->pre_ignore_wlan_act,
861 coex_dm->cur_ignore_wlan_act); 849 coex_dm->cur_ignore_wlan_act);
862 850
863 if (coex_dm->pre_ignore_wlan_act == 851 if (coex_dm->pre_ignore_wlan_act ==
864 coex_dm->cur_ignore_wlan_act) 852 coex_dm->cur_ignore_wlan_act)
@@ -887,13 +875,13 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
887 coex_dm->ps_tdma_para[3] = byte4; 875 coex_dm->ps_tdma_para[3] = byte4;
888 coex_dm->ps_tdma_para[4] = byte5; 876 coex_dm->ps_tdma_para[4] = byte5;
889 877
890 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 878 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
891 "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", 879 "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
892 h2c_parameter[0], 880 h2c_parameter[0],
893 h2c_parameter[1]<<24 | 881 h2c_parameter[1] << 24 |
894 h2c_parameter[2]<<16 | 882 h2c_parameter[2] << 16 |
895 h2c_parameter[3]<<8 | 883 h2c_parameter[3] << 8 |
896 h2c_parameter[4]); 884 h2c_parameter[4]);
897 btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); 885 btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
898} 886}
899 887
@@ -910,22 +898,22 @@ static void halbtc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
910static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist, 898static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
911 bool force_exec, u8 lps_val, u8 rpwm_val) 899 bool force_exec, u8 lps_val, u8 rpwm_val)
912{ 900{
913 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 901 btc_alg_dbg(ALGO_TRACE_FW,
914 "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n", 902 "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
915 (force_exec ? "force to" : ""), lps_val, rpwm_val); 903 (force_exec ? "force to" : ""), lps_val, rpwm_val);
916 coex_dm->cur_lps = lps_val; 904 coex_dm->cur_lps = lps_val;
917 coex_dm->cur_rpwm = rpwm_val; 905 coex_dm->cur_rpwm = rpwm_val;
918 906
919 if (!force_exec) { 907 if (!force_exec) {
920 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 908 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
921 "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n", 909 "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
922 coex_dm->cur_lps, coex_dm->cur_rpwm); 910 coex_dm->cur_lps, coex_dm->cur_rpwm);
923 911
924 if ((coex_dm->pre_lps == coex_dm->cur_lps) && 912 if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
925 (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) { 913 (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
926 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 914 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
927 "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n", 915 "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
928 coex_dm->pre_rpwm, coex_dm->cur_rpwm); 916 coex_dm->pre_rpwm, coex_dm->cur_rpwm);
929 917
930 return; 918 return;
931 } 919 }
@@ -939,8 +927,8 @@ static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
939static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist, 927static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
940 bool low_penalty_ra) 928 bool low_penalty_ra)
941{ 929{
942 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 930 btc_alg_dbg(ALGO_BT_MONITOR,
943 "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); 931 "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
944 932
945 halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); 933 halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
946} 934}
@@ -1036,13 +1024,13 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
1036 1024
1037 if (!force_exec) { 1025 if (!force_exec) {
1038 if (coex_dm->cur_ps_tdma_on) { 1026 if (coex_dm->cur_ps_tdma_on) {
1039 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1027 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1040 "[BTCoex], ********** TDMA(on, %d) **********\n", 1028 "[BTCoex], ********** TDMA(on, %d) **********\n",
1041 coex_dm->cur_ps_tdma); 1029 coex_dm->cur_ps_tdma);
1042 } else { 1030 } else {
1043 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1031 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1044 "[BTCoex], ********** TDMA(off, %d) **********\n", 1032 "[BTCoex], ********** TDMA(off, %d) **********\n",
1045 coex_dm->cur_ps_tdma); 1033 coex_dm->cur_ps_tdma);
1046 } 1034 }
1047 if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && 1035 if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
1048 (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) 1036 (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1253,50 +1241,50 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
1253 if (!wifi_connected && 1241 if (!wifi_connected &&
1254 BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == 1242 BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
1255 coex_dm->bt_status) { 1243 coex_dm->bt_status) {
1256 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1244 btc_alg_dbg(ALGO_TRACE,
1257 "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); 1245 "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
1258 halbtc8821a1ant_sw_mechanism(btcoexist, false); 1246 halbtc8821a1ant_sw_mechanism(btcoexist, false);
1259 1247
1260 common = true; 1248 common = true;
1261 } else if (wifi_connected && 1249 } else if (wifi_connected &&
1262 (BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == 1250 (BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
1263 coex_dm->bt_status)) { 1251 coex_dm->bt_status)) {
1264 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1252 btc_alg_dbg(ALGO_TRACE,
1265 "[BTCoex], Wifi connected + BT non connected-idle!!\n"); 1253 "[BTCoex], Wifi connected + BT non connected-idle!!\n");
1266 halbtc8821a1ant_sw_mechanism(btcoexist, false); 1254 halbtc8821a1ant_sw_mechanism(btcoexist, false);
1267 1255
1268 common = true; 1256 common = true;
1269 } else if (!wifi_connected && 1257 } else if (!wifi_connected &&
1270 (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE == 1258 (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
1271 coex_dm->bt_status)) { 1259 coex_dm->bt_status)) {
1272 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1260 btc_alg_dbg(ALGO_TRACE,
1273 "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); 1261 "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
1274 halbtc8821a1ant_sw_mechanism(btcoexist, false); 1262 halbtc8821a1ant_sw_mechanism(btcoexist, false);
1275 1263
1276 common = true; 1264 common = true;
1277 } else if (wifi_connected && 1265 } else if (wifi_connected &&
1278 (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE == 1266 (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
1279 coex_dm->bt_status)) { 1267 coex_dm->bt_status)) {
1280 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1268 btc_alg_dbg(ALGO_TRACE,
1281 "[BTCoex], Wifi connected + BT connected-idle!!\n"); 1269 "[BTCoex], Wifi connected + BT connected-idle!!\n");
1282 halbtc8821a1ant_sw_mechanism(btcoexist, false); 1270 halbtc8821a1ant_sw_mechanism(btcoexist, false);
1283 1271
1284 common = true; 1272 common = true;
1285 } else if (!wifi_connected && 1273 } else if (!wifi_connected &&
1286 (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE != 1274 (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE !=
1287 coex_dm->bt_status)) { 1275 coex_dm->bt_status)) {
1288 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1276 btc_alg_dbg(ALGO_TRACE,
1289 "[BTCoex], Wifi non connected-idle + BT Busy!!\n"); 1277 "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
1290 halbtc8821a1ant_sw_mechanism(btcoexist, false); 1278 halbtc8821a1ant_sw_mechanism(btcoexist, false);
1291 1279
1292 common = true; 1280 common = true;
1293 } else { 1281 } else {
1294 if (wifi_busy) { 1282 if (wifi_busy) {
1295 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1283 btc_alg_dbg(ALGO_TRACE,
1296 "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); 1284 "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
1297 } else { 1285 } else {
1298 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1286 btc_alg_dbg(ALGO_TRACE,
1299 "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); 1287 "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
1300 } 1288 }
1301 1289
1302 common = false; 1290 common = false;
@@ -1313,8 +1301,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
1313 long result; 1301 long result;
1314 u8 retry_count = 0, bt_info_ext; 1302 u8 retry_count = 0, bt_info_ext;
1315 1303
1316 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 1304 btc_alg_dbg(ALGO_TRACE_FW,
1317 "[BTCoex], TdmaDurationAdjustForAcl()\n"); 1305 "[BTCoex], TdmaDurationAdjustForAcl()\n");
1318 1306
1319 if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == 1307 if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
1320 wifi_status) || 1308 wifi_status) ||
@@ -1342,8 +1330,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
1342 1330
1343 if (!coex_dm->auto_tdma_adjust) { 1331 if (!coex_dm->auto_tdma_adjust) {
1344 coex_dm->auto_tdma_adjust = true; 1332 coex_dm->auto_tdma_adjust = true;
1345 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1333 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1346 "[BTCoex], first run TdmaDurationAdjust()!!\n"); 1334 "[BTCoex], first run TdmaDurationAdjust()!!\n");
1347 1335
1348 halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); 1336 halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
1349 coex_dm->tdma_adj_type = 2; 1337 coex_dm->tdma_adj_type = 2;
@@ -1378,9 +1366,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
1378 up = 0; 1366 up = 0;
1379 dn = 0; 1367 dn = 0;
1380 result = 1; 1368 result = 1;
1381 BTC_PRINT(BTC_MSG_ALGORITHM, 1369 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1382 ALGO_TRACE_FW_DETAIL, 1370 "[BTCoex], Increase wifi duration!!\n");
1383 "[BTCoex], Increase wifi duration!!\n");
1384 } 1371 }
1385 } else if (retry_count <= 3) { 1372 } else if (retry_count <= 3) {
1386 /* <=3 retry in the last 2-second duration*/ 1373 /* <=3 retry in the last 2-second duration*/
@@ -1410,9 +1397,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
1410 dn = 0; 1397 dn = 0;
1411 wait_count = 0; 1398 wait_count = 0;
1412 result = -1; 1399 result = -1;
1413 BTC_PRINT(BTC_MSG_ALGORITHM, 1400 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1414 ALGO_TRACE_FW_DETAIL, 1401 "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
1415 "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
1416 } 1402 }
1417 } else { 1403 } else {
1418 /* retry count > 3, if retry count > 3 happens once, 1404 /* retry count > 3, if retry count > 3 happens once,
@@ -1433,8 +1419,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
1433 dn = 0; 1419 dn = 0;
1434 wait_count = 0; 1420 wait_count = 0;
1435 result = -1; 1421 result = -1;
1436 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1422 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1437 "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); 1423 "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
1438 } 1424 }
1439 1425
1440 if (result == -1) { 1426 if (result == -1) {
@@ -1479,9 +1465,9 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
1479 } 1465 }
1480 } else { 1466 } else {
1481 /*no change*/ 1467 /*no change*/
1482 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1468 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1483 "[BTCoex], ********** TDMA(on, %d) **********\n", 1469 "[BTCoex], ********** TDMA(on, %d) **********\n",
1484 coex_dm->cur_ps_tdma); 1470 coex_dm->cur_ps_tdma);
1485 } 1471 }
1486 1472
1487 if (coex_dm->cur_ps_tdma != 1 && 1473 if (coex_dm->cur_ps_tdma != 1 &&
@@ -1603,27 +1589,27 @@ static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
1603 bt_disabled = false; 1589 bt_disabled = false;
1604 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, 1590 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
1605 &bt_disabled); 1591 &bt_disabled);
1606 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 1592 btc_alg_dbg(ALGO_BT_MONITOR,
1607 "[BTCoex], BT is enabled !!\n"); 1593 "[BTCoex], BT is enabled !!\n");
1608 } else { 1594 } else {
1609 bt_disable_cnt++; 1595 bt_disable_cnt++;
1610 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 1596 btc_alg_dbg(ALGO_BT_MONITOR,
1611 "[BTCoex], bt all counters = 0, %d times!!\n", 1597 "[BTCoex], bt all counters = 0, %d times!!\n",
1612 bt_disable_cnt); 1598 bt_disable_cnt);
1613 if (bt_disable_cnt >= 2) { 1599 if (bt_disable_cnt >= 2) {
1614 bt_disabled = true; 1600 bt_disabled = true;
1615 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, 1601 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
1616 &bt_disabled); 1602 &bt_disabled);
1617 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 1603 btc_alg_dbg(ALGO_BT_MONITOR,
1618 "[BTCoex], BT is disabled !!\n"); 1604 "[BTCoex], BT is disabled !!\n");
1619 halbtc8821a1ant_action_wifi_only(btcoexist); 1605 halbtc8821a1ant_action_wifi_only(btcoexist);
1620 } 1606 }
1621 } 1607 }
1622 if (pre_bt_disabled != bt_disabled) { 1608 if (pre_bt_disabled != bt_disabled) {
1623 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 1609 btc_alg_dbg(ALGO_BT_MONITOR,
1624 "[BTCoex], BT is from %s to %s!!\n", 1610 "[BTCoex], BT is from %s to %s!!\n",
1625 (pre_bt_disabled ? "disabled" : "enabled"), 1611 (pre_bt_disabled ? "disabled" : "enabled"),
1626 (bt_disabled ? "disabled" : "enabled")); 1612 (bt_disabled ? "disabled" : "enabled"));
1627 pre_bt_disabled = bt_disabled; 1613 pre_bt_disabled = bt_disabled;
1628 if (bt_disabled) { 1614 if (bt_disabled) {
1629 btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, 1615 btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
@@ -1897,15 +1883,15 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
1897 bool scan = false, link = false, roam = false; 1883 bool scan = false, link = false, roam = false;
1898 bool under_4way = false; 1884 bool under_4way = false;
1899 1885
1900 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1886 btc_alg_dbg(ALGO_TRACE,
1901 "[BTCoex], CoexForWifiConnect()===>\n"); 1887 "[BTCoex], CoexForWifiConnect()===>\n");
1902 1888
1903 btcoexist->btc_get(btcoexist, 1889 btcoexist->btc_get(btcoexist,
1904 BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way); 1890 BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way);
1905 if (under_4way) { 1891 if (under_4way) {
1906 btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist); 1892 btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
1907 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1893 btc_alg_dbg(ALGO_TRACE,
1908 "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"); 1894 "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
1909 return; 1895 return;
1910 } 1896 }
1911 1897
@@ -1914,8 +1900,8 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
1914 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); 1900 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
1915 if (scan || link || roam) { 1901 if (scan || link || roam) {
1916 halbtc8821a1ant_action_wifi_connected_scan(btcoexist); 1902 halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
1917 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1903 btc_alg_dbg(ALGO_TRACE,
1918 "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); 1904 "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
1919 return; 1905 return;
1920 } 1906 }
1921 1907
@@ -1976,58 +1962,58 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
1976 if (!halbtc8821a1ant_is_common_action(btcoexist)) { 1962 if (!halbtc8821a1ant_is_common_action(btcoexist)) {
1977 switch (coex_dm->cur_algorithm) { 1963 switch (coex_dm->cur_algorithm) {
1978 case BT_8821A_1ANT_COEX_ALGO_SCO: 1964 case BT_8821A_1ANT_COEX_ALGO_SCO:
1979 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1965 btc_alg_dbg(ALGO_TRACE,
1980 "[BTCoex], Action algorithm = SCO.\n"); 1966 "[BTCoex], Action algorithm = SCO\n");
1981 halbtc8821a1ant_action_sco(btcoexist); 1967 halbtc8821a1ant_action_sco(btcoexist);
1982 break; 1968 break;
1983 case BT_8821A_1ANT_COEX_ALGO_HID: 1969 case BT_8821A_1ANT_COEX_ALGO_HID:
1984 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1970 btc_alg_dbg(ALGO_TRACE,
1985 "[BTCoex], Action algorithm = HID.\n"); 1971 "[BTCoex], Action algorithm = HID\n");
1986 halbtc8821a1ant_action_hid(btcoexist); 1972 halbtc8821a1ant_action_hid(btcoexist);
1987 break; 1973 break;
1988 case BT_8821A_1ANT_COEX_ALGO_A2DP: 1974 case BT_8821A_1ANT_COEX_ALGO_A2DP:
1989 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1975 btc_alg_dbg(ALGO_TRACE,
1990 "[BTCoex], Action algorithm = A2DP.\n"); 1976 "[BTCoex], Action algorithm = A2DP\n");
1991 halbtc8821a1ant_action_a2dp(btcoexist); 1977 halbtc8821a1ant_action_a2dp(btcoexist);
1992 break; 1978 break;
1993 case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS: 1979 case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS:
1994 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1980 btc_alg_dbg(ALGO_TRACE,
1995 "[BTCoex], Action algorithm = A2DP+PAN(HS).\n"); 1981 "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
1996 halbtc8821a1ant_action_a2dp_pan_hs(btcoexist); 1982 halbtc8821a1ant_action_a2dp_pan_hs(btcoexist);
1997 break; 1983 break;
1998 case BT_8821A_1ANT_COEX_ALGO_PANEDR: 1984 case BT_8821A_1ANT_COEX_ALGO_PANEDR:
1999 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1985 btc_alg_dbg(ALGO_TRACE,
2000 "[BTCoex], Action algorithm = PAN(EDR).\n"); 1986 "[BTCoex], Action algorithm = PAN(EDR)\n");
2001 halbtc8821a1ant_action_pan_edr(btcoexist); 1987 halbtc8821a1ant_action_pan_edr(btcoexist);
2002 break; 1988 break;
2003 case BT_8821A_1ANT_COEX_ALGO_PANHS: 1989 case BT_8821A_1ANT_COEX_ALGO_PANHS:
2004 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1990 btc_alg_dbg(ALGO_TRACE,
2005 "[BTCoex], Action algorithm = HS mode.\n"); 1991 "[BTCoex], Action algorithm = HS mode\n");
2006 halbtc8821a1ant_action_pan_hs(btcoexist); 1992 halbtc8821a1ant_action_pan_hs(btcoexist);
2007 break; 1993 break;
2008 case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP: 1994 case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP:
2009 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1995 btc_alg_dbg(ALGO_TRACE,
2010 "[BTCoex], Action algorithm = PAN+A2DP.\n"); 1996 "[BTCoex], Action algorithm = PAN+A2DP\n");
2011 halbtc8821a1ant_action_pan_edr_a2dp(btcoexist); 1997 halbtc8821a1ant_action_pan_edr_a2dp(btcoexist);
2012 break; 1998 break;
2013 case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID: 1999 case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID:
2014 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2000 btc_alg_dbg(ALGO_TRACE,
2015 "[BTCoex], Action algorithm = PAN(EDR)+HID.\n"); 2001 "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
2016 halbtc8821a1ant_action_pan_edr_hid(btcoexist); 2002 halbtc8821a1ant_action_pan_edr_hid(btcoexist);
2017 break; 2003 break;
2018 case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR: 2004 case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
2019 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2005 btc_alg_dbg(ALGO_TRACE,
2020 "[BTCoex], Action algorithm = HID+A2DP+PAN.\n"); 2006 "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
2021 btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist); 2007 btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist);
2022 break; 2008 break;
2023 case BT_8821A_1ANT_COEX_ALGO_HID_A2DP: 2009 case BT_8821A_1ANT_COEX_ALGO_HID_A2DP:
2024 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2010 btc_alg_dbg(ALGO_TRACE,
2025 "[BTCoex], Action algorithm = HID+A2DP.\n"); 2011 "[BTCoex], Action algorithm = HID+A2DP\n");
2026 halbtc8821a1ant_action_hid_a2dp(btcoexist); 2012 halbtc8821a1ant_action_hid_a2dp(btcoexist);
2027 break; 2013 break;
2028 default: 2014 default:
2029 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2015 btc_alg_dbg(ALGO_TRACE,
2030 "[BTCoex], Action algorithm = coexist All Off!!\n"); 2016 "[BTCoex], Action algorithm = coexist All Off!!\n");
2031 /*halbtc8821a1ant_coex_all_off(btcoexist);*/ 2017 /*halbtc8821a1ant_coex_all_off(btcoexist);*/
2032 break; 2018 break;
2033 } 2019 }
@@ -2045,31 +2031,31 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
2045 u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH; 2031 u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
2046 bool wifi_under_5g = false; 2032 bool wifi_under_5g = false;
2047 2033
2048 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2034 btc_alg_dbg(ALGO_TRACE,
2049 "[BTCoex], RunCoexistMechanism()===>\n"); 2035 "[BTCoex], RunCoexistMechanism()===>\n");
2050 2036
2051 if (btcoexist->manual_control) { 2037 if (btcoexist->manual_control) {
2052 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2038 btc_alg_dbg(ALGO_TRACE,
2053 "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); 2039 "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
2054 return; 2040 return;
2055 } 2041 }
2056 2042
2057 if (btcoexist->stop_coex_dm) { 2043 if (btcoexist->stop_coex_dm) {
2058 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2044 btc_alg_dbg(ALGO_TRACE,
2059 "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"); 2045 "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
2060 return; 2046 return;
2061 } 2047 }
2062 2048
2063 if (coex_sta->under_ips) { 2049 if (coex_sta->under_ips) {
2064 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2050 btc_alg_dbg(ALGO_TRACE,
2065 "[BTCoex], wifi is under IPS !!!\n"); 2051 "[BTCoex], wifi is under IPS !!!\n");
2066 return; 2052 return;
2067 } 2053 }
2068 2054
2069 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); 2055 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
2070 if (wifi_under_5g) { 2056 if (wifi_under_5g) {
2071 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2057 btc_alg_dbg(ALGO_TRACE,
2072 "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); 2058 "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
2073 halbtc8821a1ant_coex_under_5g(btcoexist); 2059 halbtc8821a1ant_coex_under_5g(btcoexist);
2074 return; 2060 return;
2075 } 2061 }
@@ -2135,8 +2121,8 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
2135 if (!wifi_connected) { 2121 if (!wifi_connected) {
2136 bool scan = false, link = false, roam = false; 2122 bool scan = false, link = false, roam = false;
2137 2123
2138 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2124 btc_alg_dbg(ALGO_TRACE,
2139 "[BTCoex], wifi is non connected-idle !!!\n"); 2125 "[BTCoex], wifi is non connected-idle !!!\n");
2140 2126
2141 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); 2127 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
2142 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); 2128 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2168,8 +2154,8 @@ static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
2168 u8 u1_tmp = 0; 2154 u8 u1_tmp = 0;
2169 bool wifi_under_5g = false; 2155 bool wifi_under_5g = false;
2170 2156
2171 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 2157 btc_iface_dbg(INTF_INIT,
2172 "[BTCoex], 1Ant Init HW Config!!\n"); 2158 "[BTCoex], 1Ant Init HW Config!!\n");
2173 2159
2174 if (back_up) { 2160 if (back_up) {
2175 coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist, 2161 coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist,
@@ -2220,8 +2206,8 @@ void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist)
2220 2206
2221void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist) 2207void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
2222{ 2208{
2223 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 2209 btc_iface_dbg(INTF_INIT,
2224 "[BTCoex], Coex Mechanism Init!!\n"); 2210 "[BTCoex], Coex Mechanism Init!!\n");
2225 2211
2226 btcoexist->stop_coex_dm = false; 2212 btcoexist->stop_coex_dm = false;
2227 2213
@@ -2515,8 +2501,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
2515 return; 2501 return;
2516 2502
2517 if (BTC_IPS_ENTER == type) { 2503 if (BTC_IPS_ENTER == type) {
2518 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2504 btc_iface_dbg(INTF_NOTIFY,
2519 "[BTCoex], IPS ENTER notify\n"); 2505 "[BTCoex], IPS ENTER notify\n");
2520 coex_sta->under_ips = true; 2506 coex_sta->under_ips = true;
2521 halbtc8821a1ant_set_ant_path(btcoexist, 2507 halbtc8821a1ant_set_ant_path(btcoexist,
2522 BTC_ANT_PATH_BT, false, true); 2508 BTC_ANT_PATH_BT, false, true);
@@ -2525,8 +2511,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
2525 halbtc8821a1ant_coex_table_with_type(btcoexist, 2511 halbtc8821a1ant_coex_table_with_type(btcoexist,
2526 NORMAL_EXEC, 0); 2512 NORMAL_EXEC, 0);
2527 } else if (BTC_IPS_LEAVE == type) { 2513 } else if (BTC_IPS_LEAVE == type) {
2528 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2514 btc_iface_dbg(INTF_NOTIFY,
2529 "[BTCoex], IPS LEAVE notify\n"); 2515 "[BTCoex], IPS LEAVE notify\n");
2530 coex_sta->under_ips = false; 2516 coex_sta->under_ips = false;
2531 2517
2532 halbtc8821a1ant_run_coexist_mechanism(btcoexist); 2518 halbtc8821a1ant_run_coexist_mechanism(btcoexist);
@@ -2539,12 +2525,12 @@ void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
2539 return; 2525 return;
2540 2526
2541 if (BTC_LPS_ENABLE == type) { 2527 if (BTC_LPS_ENABLE == type) {
2542 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2528 btc_iface_dbg(INTF_NOTIFY,
2543 "[BTCoex], LPS ENABLE notify\n"); 2529 "[BTCoex], LPS ENABLE notify\n");
2544 coex_sta->under_Lps = true; 2530 coex_sta->under_Lps = true;
2545 } else if (BTC_LPS_DISABLE == type) { 2531 } else if (BTC_LPS_DISABLE == type) {
2546 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2532 btc_iface_dbg(INTF_NOTIFY,
2547 "[BTCoex], LPS DISABLE notify\n"); 2533 "[BTCoex], LPS DISABLE notify\n");
2548 coex_sta->under_Lps = false; 2534 coex_sta->under_Lps = false;
2549 } 2535 }
2550} 2536}
@@ -2574,8 +2560,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
2574 } 2560 }
2575 2561
2576 if (BTC_SCAN_START == type) { 2562 if (BTC_SCAN_START == type) {
2577 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2563 btc_iface_dbg(INTF_NOTIFY,
2578 "[BTCoex], SCAN START notify\n"); 2564 "[BTCoex], SCAN START notify\n");
2579 if (!wifi_connected) { 2565 if (!wifi_connected) {
2580 /* non-connected scan*/ 2566 /* non-connected scan*/
2581 btc8821a1ant_act_wifi_not_conn_scan(btcoexist); 2567 btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
@@ -2584,8 +2570,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
2584 halbtc8821a1ant_action_wifi_connected_scan(btcoexist); 2570 halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
2585 } 2571 }
2586 } else if (BTC_SCAN_FINISH == type) { 2572 } else if (BTC_SCAN_FINISH == type) {
2587 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2573 btc_iface_dbg(INTF_NOTIFY,
2588 "[BTCoex], SCAN FINISH notify\n"); 2574 "[BTCoex], SCAN FINISH notify\n");
2589 if (!wifi_connected) { 2575 if (!wifi_connected) {
2590 /* non-connected scan*/ 2576 /* non-connected scan*/
2591 halbtc8821a1ant_action_wifi_not_connected(btcoexist); 2577 halbtc8821a1ant_action_wifi_not_connected(btcoexist);
@@ -2614,12 +2600,12 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
2614 } 2600 }
2615 2601
2616 if (BTC_ASSOCIATE_START == type) { 2602 if (BTC_ASSOCIATE_START == type) {
2617 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2603 btc_iface_dbg(INTF_NOTIFY,
2618 "[BTCoex], CONNECT START notify\n"); 2604 "[BTCoex], CONNECT START notify\n");
2619 btc8821a1ant_act_wifi_not_conn_scan(btcoexist); 2605 btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
2620 } else if (BTC_ASSOCIATE_FINISH == type) { 2606 } else if (BTC_ASSOCIATE_FINISH == type) {
2621 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2607 btc_iface_dbg(INTF_NOTIFY,
2622 "[BTCoex], CONNECT FINISH notify\n"); 2608 "[BTCoex], CONNECT FINISH notify\n");
2623 2609
2624 btcoexist->btc_get(btcoexist, 2610 btcoexist->btc_get(btcoexist,
2625 BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); 2611 BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
@@ -2645,11 +2631,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
2645 return; 2631 return;
2646 2632
2647 if (BTC_MEDIA_CONNECT == type) { 2633 if (BTC_MEDIA_CONNECT == type) {
2648 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2634 btc_iface_dbg(INTF_NOTIFY,
2649 "[BTCoex], MEDIA connect notify\n"); 2635 "[BTCoex], MEDIA connect notify\n");
2650 } else { 2636 } else {
2651 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2637 btc_iface_dbg(INTF_NOTIFY,
2652 "[BTCoex], MEDIA disconnect notify\n"); 2638 "[BTCoex], MEDIA disconnect notify\n");
2653 } 2639 }
2654 2640
2655 /* only 2.4G we need to inform bt the chnl mask*/ 2641 /* only 2.4G we need to inform bt the chnl mask*/
@@ -2672,9 +2658,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
2672 coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; 2658 coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
2673 coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; 2659 coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
2674 2660
2675 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 2661 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
2676 "[BTCoex], FW write 0x66 = 0x%x\n", 2662 "[BTCoex], FW write 0x66 = 0x%x\n",
2677 h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]); 2663 h2c_parameter[0] << 16 |
2664 h2c_parameter[1] << 8 |
2665 h2c_parameter[2]);
2678 2666
2679 btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); 2667 btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
2680} 2668}
@@ -2702,8 +2690,8 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
2702 2690
2703 if (BTC_PACKET_DHCP == type || 2691 if (BTC_PACKET_DHCP == type ||
2704 BTC_PACKET_EAPOL == type) { 2692 BTC_PACKET_EAPOL == type) {
2705 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2693 btc_iface_dbg(INTF_NOTIFY,
2706 "[BTCoex], special Packet(%d) notify\n", type); 2694 "[BTCoex], special Packet(%d) notify\n", type);
2707 btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist); 2695 btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
2708 } 2696 }
2709} 2697}
@@ -2727,19 +2715,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
2727 rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW; 2715 rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW;
2728 coex_sta->bt_info_c2h_cnt[rsp_source]++; 2716 coex_sta->bt_info_c2h_cnt[rsp_source]++;
2729 2717
2730 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2718 btc_iface_dbg(INTF_NOTIFY,
2731 "[BTCoex], Bt info[%d], length = %d, hex data = [", 2719 "[BTCoex], Bt info[%d], length = %d, hex data = [",
2732 rsp_source, length); 2720 rsp_source, length);
2733 for (i = 0; i < length; i++) { 2721 for (i = 0; i < length; i++) {
2734 coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; 2722 coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
2735 if (i == 1) 2723 if (i == 1)
2736 bt_info = tmp_buf[i]; 2724 bt_info = tmp_buf[i];
2737 if (i == length-1) { 2725 if (i == length-1) {
2738 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2726 btc_iface_dbg(INTF_NOTIFY,
2739 "0x%02x]\n", tmp_buf[i]); 2727 "0x%02x]\n", tmp_buf[i]);
2740 } else { 2728 } else {
2741 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2729 btc_iface_dbg(INTF_NOTIFY,
2742 "0x%02x, ", tmp_buf[i]); 2730 "0x%02x, ", tmp_buf[i]);
2743 } 2731 }
2744 } 2732 }
2745 2733
@@ -2756,8 +2744,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
2756 /* Here we need to resend some wifi info to BT*/ 2744 /* Here we need to resend some wifi info to BT*/
2757 /* because bt is reset and loss of the info.*/ 2745 /* because bt is reset and loss of the info.*/
2758 if (coex_sta->bt_info_ext & BIT1) { 2746 if (coex_sta->bt_info_ext & BIT1) {
2759 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2747 btc_alg_dbg(ALGO_TRACE,
2760 "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); 2748 "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
2761 btcoexist->btc_get(btcoexist, 2749 btcoexist->btc_get(btcoexist,
2762 BTC_GET_BL_WIFI_CONNECTED, 2750 BTC_GET_BL_WIFI_CONNECTED,
2763 &wifi_connected); 2751 &wifi_connected);
@@ -2773,8 +2761,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
2773 if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) { 2761 if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) {
2774 if (!btcoexist->manual_control && 2762 if (!btcoexist->manual_control &&
2775 !btcoexist->stop_coex_dm) { 2763 !btcoexist->stop_coex_dm) {
2776 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2764 btc_alg_dbg(ALGO_TRACE,
2777 "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"); 2765 "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
2778 halbtc8821a1ant_ignore_wlan_act(btcoexist, 2766 halbtc8821a1ant_ignore_wlan_act(btcoexist,
2779 FORCE_EXEC, 2767 FORCE_EXEC,
2780 false); 2768 false);
@@ -2782,8 +2770,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
2782 } 2770 }
2783#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0) 2771#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
2784 if (!(coex_sta->bt_info_ext & BIT4)) { 2772 if (!(coex_sta->bt_info_ext & BIT4)) {
2785 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2773 btc_alg_dbg(ALGO_TRACE,
2786 "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n"); 2774 "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
2787 halbtc8821a1ant_bt_auto_report(btcoexist, 2775 halbtc8821a1ant_bt_auto_report(btcoexist,
2788 FORCE_EXEC, true); 2776 FORCE_EXEC, true);
2789 } 2777 }
@@ -2828,28 +2816,28 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
2828 2816
2829 if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) { 2817 if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) {
2830 coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE; 2818 coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
2831 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2819 btc_alg_dbg(ALGO_TRACE,
2832 "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"); 2820 "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
2833 } else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) { 2821 } else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) {
2834 /* connection exists but no busy*/ 2822 /* connection exists but no busy*/
2835 coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE; 2823 coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE;
2836 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2824 btc_alg_dbg(ALGO_TRACE,
2837 "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); 2825 "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
2838 } else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) || 2826 } else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) ||
2839 (bt_info&BT_INFO_8821A_1ANT_B_SCO_BUSY)) { 2827 (bt_info&BT_INFO_8821A_1ANT_B_SCO_BUSY)) {
2840 coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY; 2828 coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY;
2841 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2829 btc_alg_dbg(ALGO_TRACE,
2842 "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); 2830 "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
2843 } else if (bt_info&BT_INFO_8821A_1ANT_B_ACL_BUSY) { 2831 } else if (bt_info&BT_INFO_8821A_1ANT_B_ACL_BUSY) {
2844 if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status) 2832 if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
2845 coex_dm->auto_tdma_adjust = false; 2833 coex_dm->auto_tdma_adjust = false;
2846 coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY; 2834 coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY;
2847 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2835 btc_alg_dbg(ALGO_TRACE,
2848 "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); 2836 "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
2849 } else { 2837 } else {
2850 coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX; 2838 coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX;
2851 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2839 btc_alg_dbg(ALGO_TRACE,
2852 "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"); 2840 "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
2853 } 2841 }
2854 2842
2855 if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || 2843 if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -2866,8 +2854,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
2866 2854
2867void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist) 2855void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
2868{ 2856{
2869 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2857 btc_iface_dbg(INTF_NOTIFY,
2870 "[BTCoex], Halt notify\n"); 2858 "[BTCoex], Halt notify\n");
2871 2859
2872 btcoexist->stop_coex_dm = true; 2860 btcoexist->stop_coex_dm = true;
2873 2861
@@ -2885,20 +2873,20 @@ void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
2885 2873
2886void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) 2874void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
2887{ 2875{
2888 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2876 btc_iface_dbg(INTF_NOTIFY,
2889 "[BTCoex], Pnp notify\n"); 2877 "[BTCoex], Pnp notify\n");
2890 2878
2891 if (BTC_WIFI_PNP_SLEEP == pnp_state) { 2879 if (BTC_WIFI_PNP_SLEEP == pnp_state) {
2892 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2880 btc_iface_dbg(INTF_NOTIFY,
2893 "[BTCoex], Pnp notify to SLEEP\n"); 2881 "[BTCoex], Pnp notify to SLEEP\n");
2894 btcoexist->stop_coex_dm = true; 2882 btcoexist->stop_coex_dm = true;
2895 halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); 2883 halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
2896 halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 2884 halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
2897 0x0, 0x0); 2885 0x0, 0x0);
2898 halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9); 2886 halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
2899 } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) { 2887 } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
2900 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 2888 btc_iface_dbg(INTF_NOTIFY,
2901 "[BTCoex], Pnp notify to WAKE UP\n"); 2889 "[BTCoex], Pnp notify to WAKE UP\n");
2902 btcoexist->stop_coex_dm = false; 2890 btcoexist->stop_coex_dm = false;
2903 halbtc8821a1ant_init_hw_config(btcoexist, false); 2891 halbtc8821a1ant_init_hw_config(btcoexist, false);
2904 halbtc8821a1ant_init_coex_dm(btcoexist); 2892 halbtc8821a1ant_init_coex_dm(btcoexist);
@@ -2914,33 +2902,33 @@ ex_halbtc8821a1ant_periodical(
2914 struct btc_board_info *board_info = &btcoexist->board_info; 2902 struct btc_board_info *board_info = &btcoexist->board_info;
2915 struct btc_stack_info *stack_info = &btcoexist->stack_info; 2903 struct btc_stack_info *stack_info = &btcoexist->stack_info;
2916 2904
2917 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 2905 btc_alg_dbg(ALGO_TRACE,
2918 "[BTCoex], ==========================Periodical===========================\n"); 2906 "[BTCoex], ==========================Periodical===========================\n");
2919 2907
2920 if (dis_ver_info_cnt <= 5) { 2908 if (dis_ver_info_cnt <= 5) {
2921 dis_ver_info_cnt += 1; 2909 dis_ver_info_cnt += 1;
2922 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 2910 btc_iface_dbg(INTF_INIT,
2923 "[BTCoex], ****************************************************************\n"); 2911 "[BTCoex], ****************************************************************\n");
2924 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 2912 btc_iface_dbg(INTF_INIT,
2925 "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", 2913 "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
2926 board_info->pg_ant_num, 2914 board_info->pg_ant_num,
2927 board_info->btdm_ant_num, 2915 board_info->btdm_ant_num,
2928 board_info->btdm_ant_pos); 2916 board_info->btdm_ant_pos);
2929 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 2917 btc_iface_dbg(INTF_INIT,
2930 "[BTCoex], BT stack/ hci ext ver = %s / %d\n", 2918 "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
2931 ((stack_info->profile_notified) ? "Yes" : "No"), 2919 stack_info->profile_notified ? "Yes" : "No",
2932 stack_info->hci_version); 2920 stack_info->hci_version);
2933 btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, 2921 btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
2934 &bt_patch_ver); 2922 &bt_patch_ver);
2935 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); 2923 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
2936 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 2924 btc_iface_dbg(INTF_INIT,
2937 "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", 2925 "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
2938 glcoex_ver_date_8821a_1ant, 2926 glcoex_ver_date_8821a_1ant,
2939 glcoex_ver_8821a_1ant, 2927 glcoex_ver_8821a_1ant,
2940 fw_ver, bt_patch_ver, 2928 fw_ver, bt_patch_ver,
2941 bt_patch_ver); 2929 bt_patch_ver);
2942 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 2930 btc_iface_dbg(INTF_INIT,
2943 "[BTCoex], ****************************************************************\n"); 2931 "[BTCoex], ****************************************************************\n");
2944 } 2932 }
2945 2933
2946#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0) 2934#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 044d914291c0..81f843bba771 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -80,28 +80,28 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
80 BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT; 80 BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT;
81 if (bt_rssi >= tmp) { 81 if (bt_rssi >= tmp) {
82 bt_rssi_state = BTC_RSSI_STATE_HIGH; 82 bt_rssi_state = BTC_RSSI_STATE_HIGH;
83 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 83 btc_alg_dbg(ALGO_BT_RSSI_STATE,
84 "[BTCoex], BT Rssi state switch to High\n"); 84 "[BTCoex], BT Rssi state switch to High\n");
85 } else { 85 } else {
86 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; 86 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
87 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 87 btc_alg_dbg(ALGO_BT_RSSI_STATE,
88 "[BTCoex], BT Rssi state stay at Low\n"); 88 "[BTCoex], BT Rssi state stay at Low\n");
89 } 89 }
90 } else { 90 } else {
91 if (bt_rssi < rssi_thresh) { 91 if (bt_rssi < rssi_thresh) {
92 bt_rssi_state = BTC_RSSI_STATE_LOW; 92 bt_rssi_state = BTC_RSSI_STATE_LOW;
93 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 93 btc_alg_dbg(ALGO_BT_RSSI_STATE,
94 "[BTCoex], BT Rssi state switch to Low\n"); 94 "[BTCoex], BT Rssi state switch to Low\n");
95 } else { 95 } else {
96 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 96 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
97 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 97 btc_alg_dbg(ALGO_BT_RSSI_STATE,
98 "[BTCoex], BT Rssi state stay at High\n"); 98 "[BTCoex], BT Rssi state stay at High\n");
99 } 99 }
100 } 100 }
101 } else if (level_num == 3) { 101 } else if (level_num == 3) {
102 if (rssi_thresh > rssi_thresh1) { 102 if (rssi_thresh > rssi_thresh1) {
103 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 103 btc_alg_dbg(ALGO_BT_RSSI_STATE,
104 "[BTCoex], BT Rssi thresh error!!\n"); 104 "[BTCoex], BT Rssi thresh error!!\n");
105 return coex_sta->pre_bt_rssi_state; 105 return coex_sta->pre_bt_rssi_state;
106 } 106 }
107 107
@@ -110,12 +110,12 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
110 if (bt_rssi >= 110 if (bt_rssi >=
111 (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { 111 (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
112 bt_rssi_state = BTC_RSSI_STATE_MEDIUM; 112 bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
113 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 113 btc_alg_dbg(ALGO_BT_RSSI_STATE,
114 "[BTCoex], BT Rssi state switch to Medium\n"); 114 "[BTCoex], BT Rssi state switch to Medium\n");
115 } else { 115 } else {
116 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; 116 bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
117 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 117 btc_alg_dbg(ALGO_BT_RSSI_STATE,
118 "[BTCoex], BT Rssi state stay at Low\n"); 118 "[BTCoex], BT Rssi state stay at Low\n");
119 } 119 }
120 } else if ((coex_sta->pre_bt_rssi_state == 120 } else if ((coex_sta->pre_bt_rssi_state ==
121 BTC_RSSI_STATE_MEDIUM) || 121 BTC_RSSI_STATE_MEDIUM) ||
@@ -125,26 +125,26 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
125 (rssi_thresh1 + 125 (rssi_thresh1 +
126 BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { 126 BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
127 bt_rssi_state = BTC_RSSI_STATE_HIGH; 127 bt_rssi_state = BTC_RSSI_STATE_HIGH;
128 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 128 btc_alg_dbg(ALGO_BT_RSSI_STATE,
129 "[BTCoex], BT Rssi state switch to High\n"); 129 "[BTCoex], BT Rssi state switch to High\n");
130 } else if (bt_rssi < rssi_thresh) { 130 } else if (bt_rssi < rssi_thresh) {
131 bt_rssi_state = BTC_RSSI_STATE_LOW; 131 bt_rssi_state = BTC_RSSI_STATE_LOW;
132 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 132 btc_alg_dbg(ALGO_BT_RSSI_STATE,
133 "[BTCoex], BT Rssi state switch to Low\n"); 133 "[BTCoex], BT Rssi state switch to Low\n");
134 } else { 134 } else {
135 bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; 135 bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
136 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 136 btc_alg_dbg(ALGO_BT_RSSI_STATE,
137 "[BTCoex], BT Rssi state stay at Medium\n"); 137 "[BTCoex], BT Rssi state stay at Medium\n");
138 } 138 }
139 } else { 139 } else {
140 if (bt_rssi < rssi_thresh1) { 140 if (bt_rssi < rssi_thresh1) {
141 bt_rssi_state = BTC_RSSI_STATE_MEDIUM; 141 bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
142 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 142 btc_alg_dbg(ALGO_BT_RSSI_STATE,
143 "[BTCoex], BT Rssi state switch to Medium\n"); 143 "[BTCoex], BT Rssi state switch to Medium\n");
144 } else { 144 } else {
145 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 145 bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
146 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, 146 btc_alg_dbg(ALGO_BT_RSSI_STATE,
147 "[BTCoex], BT Rssi state stay at High\n"); 147 "[BTCoex], BT Rssi state stay at High\n");
148 } 148 }
149 } 149 }
150 } 150 }
@@ -171,32 +171,28 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
171 if (wifi_rssi >= 171 if (wifi_rssi >=
172 (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { 172 (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
173 wifi_rssi_state = BTC_RSSI_STATE_HIGH; 173 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
174 BTC_PRINT(BTC_MSG_ALGORITHM, 174 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
175 ALGO_WIFI_RSSI_STATE, 175 "[BTCoex], wifi RSSI state switch to High\n");
176 "[BTCoex], wifi RSSI state switch to High\n");
177 } else { 176 } else {
178 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; 177 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
179 BTC_PRINT(BTC_MSG_ALGORITHM, 178 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
180 ALGO_WIFI_RSSI_STATE, 179 "[BTCoex], wifi RSSI state stay at Low\n");
181 "[BTCoex], wifi RSSI state stay at Low\n");
182 } 180 }
183 } else { 181 } else {
184 if (wifi_rssi < rssi_thresh) { 182 if (wifi_rssi < rssi_thresh) {
185 wifi_rssi_state = BTC_RSSI_STATE_LOW; 183 wifi_rssi_state = BTC_RSSI_STATE_LOW;
186 BTC_PRINT(BTC_MSG_ALGORITHM, 184 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
187 ALGO_WIFI_RSSI_STATE, 185 "[BTCoex], wifi RSSI state switch to Low\n");
188 "[BTCoex], wifi RSSI state switch to Low\n");
189 } else { 186 } else {
190 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 187 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
191 BTC_PRINT(BTC_MSG_ALGORITHM, 188 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
192 ALGO_WIFI_RSSI_STATE, 189 "[BTCoex], wifi RSSI state stay at High\n");
193 "[BTCoex], wifi RSSI state stay at High\n");
194 } 190 }
195 } 191 }
196 } else if (level_num == 3) { 192 } else if (level_num == 3) {
197 if (rssi_thresh > rssi_thresh1) { 193 if (rssi_thresh > rssi_thresh1) {
198 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, 194 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
199 "[BTCoex], wifi RSSI thresh error!!\n"); 195 "[BTCoex], wifi RSSI thresh error!!\n");
200 return coex_sta->pre_wifi_rssi_state[index]; 196 return coex_sta->pre_wifi_rssi_state[index];
201 } 197 }
202 198
@@ -207,14 +203,12 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
207 if (wifi_rssi >= 203 if (wifi_rssi >=
208 (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { 204 (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
209 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; 205 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
210 BTC_PRINT(BTC_MSG_ALGORITHM, 206 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
211 ALGO_WIFI_RSSI_STATE, 207 "[BTCoex], wifi RSSI state switch to Medium\n");
212 "[BTCoex], wifi RSSI state switch to Medium\n");
213 } else { 208 } else {
214 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; 209 wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
215 BTC_PRINT(BTC_MSG_ALGORITHM, 210 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
216 ALGO_WIFI_RSSI_STATE, 211 "[BTCoex], wifi RSSI state stay at Low\n");
217 "[BTCoex], wifi RSSI state stay at Low\n");
218 } 212 }
219 } else if ((coex_sta->pre_wifi_rssi_state[index] == 213 } else if ((coex_sta->pre_wifi_rssi_state[index] ==
220 BTC_RSSI_STATE_MEDIUM) || 214 BTC_RSSI_STATE_MEDIUM) ||
@@ -223,31 +217,26 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
223 if (wifi_rssi >= (rssi_thresh1 + 217 if (wifi_rssi >= (rssi_thresh1 +
224 BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { 218 BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
225 wifi_rssi_state = BTC_RSSI_STATE_HIGH; 219 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
226 BTC_PRINT(BTC_MSG_ALGORITHM, 220 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
227 ALGO_WIFI_RSSI_STATE, 221 "[BTCoex], wifi RSSI state switch to High\n");
228 "[BTCoex], wifi RSSI state switch to High\n");
229 } else if (wifi_rssi < rssi_thresh) { 222 } else if (wifi_rssi < rssi_thresh) {
230 wifi_rssi_state = BTC_RSSI_STATE_LOW; 223 wifi_rssi_state = BTC_RSSI_STATE_LOW;
231 BTC_PRINT(BTC_MSG_ALGORITHM, 224 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
232 ALGO_WIFI_RSSI_STATE, 225 "[BTCoex], wifi RSSI state switch to Low\n");
233 "[BTCoex], wifi RSSI state switch to Low\n");
234 } else { 226 } else {
235 wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; 227 wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
236 BTC_PRINT(BTC_MSG_ALGORITHM, 228 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
237 ALGO_WIFI_RSSI_STATE, 229 "[BTCoex], wifi RSSI state stay at Medium\n");
238 "[BTCoex], wifi RSSI state stay at Medium\n");
239 } 230 }
240 } else { 231 } else {
241 if (wifi_rssi < rssi_thresh1) { 232 if (wifi_rssi < rssi_thresh1) {
242 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; 233 wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
243 BTC_PRINT(BTC_MSG_ALGORITHM, 234 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
244 ALGO_WIFI_RSSI_STATE, 235 "[BTCoex], wifi RSSI state switch to Medium\n");
245 "[BTCoex], wifi RSSI state switch to Medium\n");
246 } else { 236 } else {
247 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; 237 wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
248 BTC_PRINT(BTC_MSG_ALGORITHM, 238 btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
249 ALGO_WIFI_RSSI_STATE, 239 "[BTCoex], wifi RSSI state stay at High\n");
250 "[BTCoex], wifi RSSI state stay at High\n");
251 } 240 }
252 } 241 }
253 } 242 }
@@ -279,26 +268,26 @@ static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
279 bt_disabled = false; 268 bt_disabled = false;
280 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, 269 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
281 &bt_disabled); 270 &bt_disabled);
282 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 271 btc_alg_dbg(ALGO_BT_MONITOR,
283 "[BTCoex], BT is enabled !!\n"); 272 "[BTCoex], BT is enabled !!\n");
284 } else { 273 } else {
285 bt_disable_cnt++; 274 bt_disable_cnt++;
286 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 275 btc_alg_dbg(ALGO_BT_MONITOR,
287 "[BTCoex], bt all counters = 0, %d times!!\n", 276 "[BTCoex], bt all counters = 0, %d times!!\n",
288 bt_disable_cnt); 277 bt_disable_cnt);
289 if (bt_disable_cnt >= 2) { 278 if (bt_disable_cnt >= 2) {
290 bt_disabled = true; 279 bt_disabled = true;
291 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, 280 btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
292 &bt_disabled); 281 &bt_disabled);
293 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 282 btc_alg_dbg(ALGO_BT_MONITOR,
294 "[BTCoex], BT is disabled !!\n"); 283 "[BTCoex], BT is disabled !!\n");
295 } 284 }
296 } 285 }
297 if (pre_bt_disabled != bt_disabled) { 286 if (pre_bt_disabled != bt_disabled) {
298 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 287 btc_alg_dbg(ALGO_BT_MONITOR,
299 "[BTCoex], BT is from %s to %s!!\n", 288 "[BTCoex], BT is from %s to %s!!\n",
300 (pre_bt_disabled ? "disabled" : "enabled"), 289 (pre_bt_disabled ? "disabled" : "enabled"),
301 (bt_disabled ? "disabled" : "enabled")); 290 (bt_disabled ? "disabled" : "enabled"));
302 pre_bt_disabled = bt_disabled; 291 pre_bt_disabled = bt_disabled;
303 } 292 }
304} 293}
@@ -324,12 +313,12 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
324 coex_sta->low_priority_tx = reg_lp_tx; 313 coex_sta->low_priority_tx = reg_lp_tx;
325 coex_sta->low_priority_rx = reg_lp_rx; 314 coex_sta->low_priority_rx = reg_lp_rx;
326 315
327 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 316 btc_alg_dbg(ALGO_BT_MONITOR,
328 "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", 317 "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
329 reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); 318 reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
330 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, 319 btc_alg_dbg(ALGO_BT_MONITOR,
331 "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", 320 "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
332 reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); 321 reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
333 322
334 /* reset counter */ 323 /* reset counter */
335 btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); 324 btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -343,9 +332,9 @@ static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
343 332
344 h2c_parameter[0] |= BIT0; /* trigger */ 333 h2c_parameter[0] |= BIT0; /* trigger */
345 334
346 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 335 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
347 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", 336 "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
348 h2c_parameter[0]); 337 h2c_parameter[0]);
349 338
350 btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); 339 btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
351} 340}
@@ -368,8 +357,8 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
368 stack_info->bt_link_exist = coex_sta->bt_link_exist; 357 stack_info->bt_link_exist = coex_sta->bt_link_exist;
369 358
370 if (!coex_sta->bt_link_exist) { 359 if (!coex_sta->bt_link_exist) {
371 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 360 btc_alg_dbg(ALGO_TRACE,
372 "[BTCoex], No profile exists!!!\n"); 361 "[BTCoex], No profile exists!!!\n");
373 return algorithm; 362 return algorithm;
374 } 363 }
375 364
@@ -384,26 +373,26 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
384 373
385 if (num_of_diff_profile == 1) { 374 if (num_of_diff_profile == 1) {
386 if (coex_sta->sco_exist) { 375 if (coex_sta->sco_exist) {
387 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 376 btc_alg_dbg(ALGO_TRACE,
388 "[BTCoex], SCO only\n"); 377 "[BTCoex], SCO only\n");
389 algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; 378 algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
390 } else { 379 } else {
391 if (coex_sta->hid_exist) { 380 if (coex_sta->hid_exist) {
392 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 381 btc_alg_dbg(ALGO_TRACE,
393 "[BTCoex], HID only\n"); 382 "[BTCoex], HID only\n");
394 algorithm = BT_8821A_2ANT_COEX_ALGO_HID; 383 algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
395 } else if (coex_sta->a2dp_exist) { 384 } else if (coex_sta->a2dp_exist) {
396 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 385 btc_alg_dbg(ALGO_TRACE,
397 "[BTCoex], A2DP only\n"); 386 "[BTCoex], A2DP only\n");
398 algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP; 387 algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP;
399 } else if (coex_sta->pan_exist) { 388 } else if (coex_sta->pan_exist) {
400 if (bt_hs_on) { 389 if (bt_hs_on) {
401 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 390 btc_alg_dbg(ALGO_TRACE,
402 "[BTCoex], PAN(HS) only\n"); 391 "[BTCoex], PAN(HS) only\n");
403 algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS; 392 algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS;
404 } else { 393 } else {
405 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 394 btc_alg_dbg(ALGO_TRACE,
406 "[BTCoex], PAN(EDR) only\n"); 395 "[BTCoex], PAN(EDR) only\n");
407 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR; 396 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR;
408 } 397 }
409 } 398 }
@@ -411,50 +400,50 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
411 } else if (num_of_diff_profile == 2) { 400 } else if (num_of_diff_profile == 2) {
412 if (coex_sta->sco_exist) { 401 if (coex_sta->sco_exist) {
413 if (coex_sta->hid_exist) { 402 if (coex_sta->hid_exist) {
414 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 403 btc_alg_dbg(ALGO_TRACE,
415 "[BTCoex], SCO + HID\n"); 404 "[BTCoex], SCO + HID\n");
416 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; 405 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
417 } else if (coex_sta->a2dp_exist) { 406 } else if (coex_sta->a2dp_exist) {
418 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 407 btc_alg_dbg(ALGO_TRACE,
419 "[BTCoex], SCO + A2DP ==> SCO\n"); 408 "[BTCoex], SCO + A2DP ==> SCO\n");
420 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; 409 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
421 } else if (coex_sta->pan_exist) { 410 } else if (coex_sta->pan_exist) {
422 if (bt_hs_on) { 411 if (bt_hs_on) {
423 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 412 btc_alg_dbg(ALGO_TRACE,
424 "[BTCoex], SCO + PAN(HS)\n"); 413 "[BTCoex], SCO + PAN(HS)\n");
425 algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; 414 algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
426 } else { 415 } else {
427 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 416 btc_alg_dbg(ALGO_TRACE,
428 "[BTCoex], SCO + PAN(EDR)\n"); 417 "[BTCoex], SCO + PAN(EDR)\n");
429 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; 418 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
430 } 419 }
431 } 420 }
432 } else { 421 } else {
433 if (coex_sta->hid_exist && 422 if (coex_sta->hid_exist &&
434 coex_sta->a2dp_exist) { 423 coex_sta->a2dp_exist) {
435 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 424 btc_alg_dbg(ALGO_TRACE,
436 "[BTCoex], HID + A2DP\n"); 425 "[BTCoex], HID + A2DP\n");
437 algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP; 426 algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
438 } else if (coex_sta->hid_exist && 427 } else if (coex_sta->hid_exist &&
439 coex_sta->pan_exist) { 428 coex_sta->pan_exist) {
440 if (bt_hs_on) { 429 if (bt_hs_on) {
441 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 430 btc_alg_dbg(ALGO_TRACE,
442 "[BTCoex], HID + PAN(HS)\n"); 431 "[BTCoex], HID + PAN(HS)\n");
443 algorithm = BT_8821A_2ANT_COEX_ALGO_HID; 432 algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
444 } else { 433 } else {
445 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 434 btc_alg_dbg(ALGO_TRACE,
446 "[BTCoex], HID + PAN(EDR)\n"); 435 "[BTCoex], HID + PAN(EDR)\n");
447 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; 436 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
448 } 437 }
449 } else if (coex_sta->pan_exist && 438 } else if (coex_sta->pan_exist &&
450 coex_sta->a2dp_exist) { 439 coex_sta->a2dp_exist) {
451 if (bt_hs_on) { 440 if (bt_hs_on) {
452 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 441 btc_alg_dbg(ALGO_TRACE,
453 "[BTCoex], A2DP + PAN(HS)\n"); 442 "[BTCoex], A2DP + PAN(HS)\n");
454 algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS; 443 algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
455 } else { 444 } else {
456 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 445 btc_alg_dbg(ALGO_TRACE,
457 "[BTCoex], A2DP + PAN(EDR)\n"); 446 "[BTCoex], A2DP + PAN(EDR)\n");
458 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP; 447 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
459 } 448 }
460 } 449 }
@@ -463,29 +452,29 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
463 if (coex_sta->sco_exist) { 452 if (coex_sta->sco_exist) {
464 if (coex_sta->hid_exist && 453 if (coex_sta->hid_exist &&
465 coex_sta->a2dp_exist) { 454 coex_sta->a2dp_exist) {
466 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 455 btc_alg_dbg(ALGO_TRACE,
467 "[BTCoex], SCO + HID + A2DP ==> HID\n"); 456 "[BTCoex], SCO + HID + A2DP ==> HID\n");
468 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; 457 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
469 } else if (coex_sta->hid_exist && 458 } else if (coex_sta->hid_exist &&
470 coex_sta->pan_exist) { 459 coex_sta->pan_exist) {
471 if (bt_hs_on) { 460 if (bt_hs_on) {
472 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 461 btc_alg_dbg(ALGO_TRACE,
473 "[BTCoex], SCO + HID + PAN(HS)\n"); 462 "[BTCoex], SCO + HID + PAN(HS)\n");
474 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; 463 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
475 } else { 464 } else {
476 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 465 btc_alg_dbg(ALGO_TRACE,
477 "[BTCoex], SCO + HID + PAN(EDR)\n"); 466 "[BTCoex], SCO + HID + PAN(EDR)\n");
478 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; 467 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
479 } 468 }
480 } else if (coex_sta->pan_exist && 469 } else if (coex_sta->pan_exist &&
481 coex_sta->a2dp_exist) { 470 coex_sta->a2dp_exist) {
482 if (bt_hs_on) { 471 if (bt_hs_on) {
483 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 472 btc_alg_dbg(ALGO_TRACE,
484 "[BTCoex], SCO + A2DP + PAN(HS)\n"); 473 "[BTCoex], SCO + A2DP + PAN(HS)\n");
485 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; 474 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
486 } else { 475 } else {
487 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 476 btc_alg_dbg(ALGO_TRACE,
488 "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"); 477 "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
489 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; 478 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
490 } 479 }
491 } 480 }
@@ -494,12 +483,12 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
494 coex_sta->pan_exist && 483 coex_sta->pan_exist &&
495 coex_sta->a2dp_exist) { 484 coex_sta->a2dp_exist) {
496 if (bt_hs_on) { 485 if (bt_hs_on) {
497 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 486 btc_alg_dbg(ALGO_TRACE,
498 "[BTCoex], HID + A2DP + PAN(HS)\n"); 487 "[BTCoex], HID + A2DP + PAN(HS)\n");
499 algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP; 488 algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
500 } else { 489 } else {
501 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 490 btc_alg_dbg(ALGO_TRACE,
502 "[BTCoex], HID + A2DP + PAN(EDR)\n"); 491 "[BTCoex], HID + A2DP + PAN(EDR)\n");
503 algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR; 492 algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
504 } 493 }
505 } 494 }
@@ -510,12 +499,12 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
510 coex_sta->pan_exist && 499 coex_sta->pan_exist &&
511 coex_sta->a2dp_exist) { 500 coex_sta->a2dp_exist) {
512 if (bt_hs_on) { 501 if (bt_hs_on) {
513 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 502 btc_alg_dbg(ALGO_TRACE,
514 "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"); 503 "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
515 504
516 } else { 505 } else {
517 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 506 btc_alg_dbg(ALGO_TRACE,
518 "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); 507 "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
519 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; 508 algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
520 } 509 }
521 } 510 }
@@ -544,15 +533,15 @@ static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
544 if (wifi_connected) { 533 if (wifi_connected) {
545 if (bt_hs_on) { 534 if (bt_hs_on) {
546 if (bt_hs_rssi > 37) { 535 if (bt_hs_rssi > 37) {
547 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 536 btc_alg_dbg(ALGO_TRACE_FW,
548 "[BTCoex], Need to decrease bt power for HS mode!!\n"); 537 "[BTCoex], Need to decrease bt power for HS mode!!\n");
549 ret = true; 538 ret = true;
550 } 539 }
551 } else { 540 } else {
552 if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || 541 if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
553 (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { 542 (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
554 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 543 btc_alg_dbg(ALGO_TRACE_FW,
555 "[BTCoex], Need to decrease bt power for Wifi is connected!!\n"); 544 "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
556 ret = true; 545 ret = true;
557 } 546 }
558 } 547 }
@@ -570,10 +559,10 @@ static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist,
570 */ 559 */
571 h2c_parameter[0] = dac_swing_lvl; 560 h2c_parameter[0] = dac_swing_lvl;
572 561
573 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 562 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
574 "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl); 563 "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
575 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 564 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
576 "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); 565 "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
577 566
578 btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); 567 btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
579} 568}
@@ -588,9 +577,9 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
588 if (dec_bt_pwr) 577 if (dec_bt_pwr)
589 h2c_parameter[0] |= BIT1; 578 h2c_parameter[0] |= BIT1;
590 579
591 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 580 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
592 "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n", 581 "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
593 (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); 582 (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
594 583
595 btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); 584 btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
596} 585}
@@ -598,16 +587,16 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
598static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist, 587static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
599 bool force_exec, bool dec_bt_pwr) 588 bool force_exec, bool dec_bt_pwr)
600{ 589{
601 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 590 btc_alg_dbg(ALGO_TRACE_FW,
602 "[BTCoex], %s Dec BT power = %s\n", 591 "[BTCoex], %s Dec BT power = %s\n",
603 (force_exec ? "force to" : ""), 592 (force_exec ? "force to" : ""),
604 ((dec_bt_pwr) ? "ON" : "OFF")); 593 ((dec_bt_pwr) ? "ON" : "OFF"));
605 coex_dm->cur_dec_bt_pwr = dec_bt_pwr; 594 coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
606 595
607 if (!force_exec) { 596 if (!force_exec) {
608 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 597 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
609 "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n", 598 "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
610 coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); 599 coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
611 600
612 if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) 601 if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
613 return; 602 return;
@@ -627,10 +616,10 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
627 if (bt_lna_cons_on) 616 if (bt_lna_cons_on)
628 h2c_parameter[1] |= BIT0; 617 h2c_parameter[1] |= BIT0;
629 618
630 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 619 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
631 "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n", 620 "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
632 (bt_lna_cons_on ? "ON!!" : "OFF!!"), 621 bt_lna_cons_on ? "ON!!" : "OFF!!",
633 h2c_parameter[0]<<8|h2c_parameter[1]); 622 h2c_parameter[0] << 8 | h2c_parameter[1]);
634 623
635 btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter); 624 btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
636} 625}
@@ -638,17 +627,17 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
638static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist, 627static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist,
639 bool force_exec, bool bt_lna_cons_on) 628 bool force_exec, bool bt_lna_cons_on)
640{ 629{
641 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 630 btc_alg_dbg(ALGO_TRACE_FW,
642 "[BTCoex], %s BT Constrain = %s\n", 631 "[BTCoex], %s BT Constrain = %s\n",
643 (force_exec ? "force" : ""), 632 (force_exec ? "force" : ""),
644 ((bt_lna_cons_on) ? "ON" : "OFF")); 633 ((bt_lna_cons_on) ? "ON" : "OFF"));
645 coex_dm->cur_bt_lna_constrain = bt_lna_cons_on; 634 coex_dm->cur_bt_lna_constrain = bt_lna_cons_on;
646 635
647 if (!force_exec) { 636 if (!force_exec) {
648 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 637 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
649 "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n", 638 "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
650 coex_dm->pre_bt_lna_constrain, 639 coex_dm->pre_bt_lna_constrain,
651 coex_dm->cur_bt_lna_constrain); 640 coex_dm->cur_bt_lna_constrain);
652 641
653 if (coex_dm->pre_bt_lna_constrain == 642 if (coex_dm->pre_bt_lna_constrain ==
654 coex_dm->cur_bt_lna_constrain) 643 coex_dm->cur_bt_lna_constrain)
@@ -669,10 +658,10 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
669 658
670 h2c_parameter[1] = bt_psd_mode; 659 h2c_parameter[1] = bt_psd_mode;
671 660
672 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 661 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
673 "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n", 662 "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
674 h2c_parameter[1], 663 h2c_parameter[1],
675 h2c_parameter[0]<<8|h2c_parameter[1]); 664 h2c_parameter[0] << 8 | h2c_parameter[1]);
676 665
677 btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter); 666 btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
678} 667}
@@ -680,15 +669,15 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
680static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist, 669static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist,
681 bool force_exec, u8 bt_psd_mode) 670 bool force_exec, u8 bt_psd_mode)
682{ 671{
683 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 672 btc_alg_dbg(ALGO_TRACE_FW,
684 "[BTCoex], %s BT PSD mode = 0x%x\n", 673 "[BTCoex], %s BT PSD mode = 0x%x\n",
685 (force_exec ? "force" : ""), bt_psd_mode); 674 (force_exec ? "force" : ""), bt_psd_mode);
686 coex_dm->cur_bt_psd_mode = bt_psd_mode; 675 coex_dm->cur_bt_psd_mode = bt_psd_mode;
687 676
688 if (!force_exec) { 677 if (!force_exec) {
689 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 678 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
690 "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n", 679 "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
691 coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode); 680 coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
692 681
693 if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode) 682 if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode)
694 return; 683 return;
@@ -709,10 +698,10 @@ static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
709 if (enable_auto_report) 698 if (enable_auto_report)
710 h2c_parameter[0] |= BIT0; 699 h2c_parameter[0] |= BIT0;
711 700
712 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 701 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
713 "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", 702 "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
714 (enable_auto_report ? "Enabled!!" : "Disabled!!"), 703 (enable_auto_report ? "Enabled!!" : "Disabled!!"),
715 h2c_parameter[0]); 704 h2c_parameter[0]);
716 705
717 btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); 706 btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
718} 707}
@@ -721,17 +710,17 @@ static void halbtc8821a2ant_bt_auto_report(struct btc_coexist *btcoexist,
721 bool force_exec, 710 bool force_exec,
722 bool enable_auto_report) 711 bool enable_auto_report)
723{ 712{
724 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 713 btc_alg_dbg(ALGO_TRACE_FW,
725 "[BTCoex], %s BT Auto report = %s\n", 714 "[BTCoex], %s BT Auto report = %s\n",
726 (force_exec ? "force to" : ""), 715 (force_exec ? "force to" : ""),
727 ((enable_auto_report) ? "Enabled" : "Disabled")); 716 ((enable_auto_report) ? "Enabled" : "Disabled"));
728 coex_dm->cur_bt_auto_report = enable_auto_report; 717 coex_dm->cur_bt_auto_report = enable_auto_report;
729 718
730 if (!force_exec) { 719 if (!force_exec) {
731 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 720 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
732 "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", 721 "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
733 coex_dm->pre_bt_auto_report, 722 coex_dm->pre_bt_auto_report,
734 coex_dm->cur_bt_auto_report); 723 coex_dm->cur_bt_auto_report);
735 724
736 if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) 725 if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
737 return; 726 return;
@@ -746,16 +735,16 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
746 bool force_exec, 735 bool force_exec,
747 u8 fw_dac_swing_lvl) 736 u8 fw_dac_swing_lvl)
748{ 737{
749 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 738 btc_alg_dbg(ALGO_TRACE_FW,
750 "[BTCoex], %s set FW Dac Swing level = %d\n", 739 "[BTCoex], %s set FW Dac Swing level = %d\n",
751 (force_exec ? "force to" : ""), fw_dac_swing_lvl); 740 (force_exec ? "force to" : ""), fw_dac_swing_lvl);
752 coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl; 741 coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
753 742
754 if (!force_exec) { 743 if (!force_exec) {
755 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 744 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
756 "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n", 745 "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
757 coex_dm->pre_fw_dac_swing_lvl, 746 coex_dm->pre_fw_dac_swing_lvl,
758 coex_dm->cur_fw_dac_swing_lvl); 747 coex_dm->cur_fw_dac_swing_lvl);
759 748
760 if (coex_dm->pre_fw_dac_swing_lvl == 749 if (coex_dm->pre_fw_dac_swing_lvl ==
761 coex_dm->cur_fw_dac_swing_lvl) 750 coex_dm->cur_fw_dac_swing_lvl)
@@ -773,8 +762,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
773{ 762{
774 if (rx_rf_shrink_on) { 763 if (rx_rf_shrink_on) {
775 /* Shrink RF Rx LPF corner */ 764 /* Shrink RF Rx LPF corner */
776 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 765 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
777 "[BTCoex], Shrink RF Rx LPF corner!!\n"); 766 "[BTCoex], Shrink RF Rx LPF corner!!\n");
778 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 767 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
779 0xfffff, 0xffffc); 768 0xfffff, 0xffffc);
780 } else { 769 } else {
@@ -782,8 +771,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
782 * After initialized, we can use coex_dm->bt_rf0x1e_backup 771 * After initialized, we can use coex_dm->bt_rf0x1e_backup
783 */ 772 */
784 if (btcoexist->initilized) { 773 if (btcoexist->initilized) {
785 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 774 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
786 "[BTCoex], Resume RF Rx LPF corner!!\n"); 775 "[BTCoex], Resume RF Rx LPF corner!!\n");
787 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 776 btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
788 0x1e, 0xfffff, 777 0x1e, 0xfffff,
789 coex_dm->bt_rf0x1e_backup); 778 coex_dm->bt_rf0x1e_backup);
@@ -794,17 +783,17 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
794static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist, 783static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist,
795 bool force_exec, bool rx_rf_shrink_on) 784 bool force_exec, bool rx_rf_shrink_on)
796{ 785{
797 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 786 btc_alg_dbg(ALGO_TRACE_SW,
798 "[BTCoex], %s turn Rx RF Shrink = %s\n", 787 "[BTCoex], %s turn Rx RF Shrink = %s\n",
799 (force_exec ? "force to" : ""), 788 (force_exec ? "force to" : ""),
800 ((rx_rf_shrink_on) ? "ON" : "OFF")); 789 ((rx_rf_shrink_on) ? "ON" : "OFF"));
801 coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; 790 coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
802 791
803 if (!force_exec) { 792 if (!force_exec) {
804 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 793 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
805 "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n", 794 "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
806 coex_dm->pre_rf_rx_lpf_shrink, 795 coex_dm->pre_rf_rx_lpf_shrink,
807 coex_dm->cur_rf_rx_lpf_shrink); 796 coex_dm->cur_rf_rx_lpf_shrink);
808 797
809 if (coex_dm->pre_rf_rx_lpf_shrink == 798 if (coex_dm->pre_rf_rx_lpf_shrink ==
810 coex_dm->cur_rf_rx_lpf_shrink) 799 coex_dm->cur_rf_rx_lpf_shrink)
@@ -835,9 +824,9 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
835 h2c_parameter[5] = 0xf9; 824 h2c_parameter[5] = 0xf9;
836 } 825 }
837 826
838 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 827 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
839 "[BTCoex], set WiFi Low-Penalty Retry: %s", 828 "[BTCoex], set WiFi Low-Penalty Retry: %s",
840 (low_penalty_ra ? "ON!!" : "OFF!!")); 829 (low_penalty_ra ? "ON!!" : "OFF!!"));
841 830
842 btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); 831 btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
843} 832}
@@ -846,17 +835,17 @@ static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
846 bool force_exec, bool low_penalty_ra) 835 bool force_exec, bool low_penalty_ra)
847{ 836{
848 /*return;*/ 837 /*return;*/
849 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 838 btc_alg_dbg(ALGO_TRACE_SW,
850 "[BTCoex], %s turn LowPenaltyRA = %s\n", 839 "[BTCoex], %s turn LowPenaltyRA = %s\n",
851 (force_exec ? "force to" : ""), 840 (force_exec ? "force to" : ""),
852 ((low_penalty_ra) ? "ON" : "OFF")); 841 ((low_penalty_ra) ? "ON" : "OFF"));
853 coex_dm->cur_low_penalty_ra = low_penalty_ra; 842 coex_dm->cur_low_penalty_ra = low_penalty_ra;
854 843
855 if (!force_exec) { 844 if (!force_exec) {
856 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 845 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
857 "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n", 846 "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
858 coex_dm->pre_low_penalty_ra, 847 coex_dm->pre_low_penalty_ra,
859 coex_dm->cur_low_penalty_ra); 848 coex_dm->cur_low_penalty_ra);
860 849
861 if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) 850 if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
862 return; 851 return;
@@ -872,8 +861,8 @@ static void halbtc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
872{ 861{
873 u8 val = (u8)level; 862 u8 val = (u8)level;
874 863
875 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 864 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
876 "[BTCoex], Write SwDacSwing = 0x%x\n", level); 865 "[BTCoex], Write SwDacSwing = 0x%x\n", level);
877 btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val); 866 btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val);
878} 867}
879 868
@@ -891,21 +880,21 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
891 bool force_exec, bool dac_swing_on, 880 bool force_exec, bool dac_swing_on,
892 u32 dac_swing_lvl) 881 u32 dac_swing_lvl)
893{ 882{
894 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 883 btc_alg_dbg(ALGO_TRACE_SW,
895 "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n", 884 "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
896 (force_exec ? "force to" : ""), 885 (force_exec ? "force to" : ""),
897 ((dac_swing_on) ? "ON" : "OFF"), 886 ((dac_swing_on) ? "ON" : "OFF"),
898 dac_swing_lvl); 887 dac_swing_lvl);
899 coex_dm->cur_dac_swing_on = dac_swing_on; 888 coex_dm->cur_dac_swing_on = dac_swing_on;
900 coex_dm->cur_dac_swing_lvl = dac_swing_lvl; 889 coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
901 890
902 if (!force_exec) { 891 if (!force_exec) {
903 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 892 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
904 "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n", 893 "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
905 coex_dm->pre_dac_swing_on, 894 coex_dm->pre_dac_swing_on,
906 coex_dm->pre_dac_swing_lvl, 895 coex_dm->pre_dac_swing_lvl,
907 coex_dm->cur_dac_swing_on, 896 coex_dm->cur_dac_swing_on,
908 coex_dm->cur_dac_swing_lvl); 897 coex_dm->cur_dac_swing_lvl);
909 898
910 if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && 899 if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
911 (coex_dm->pre_dac_swing_lvl == 900 (coex_dm->pre_dac_swing_lvl ==
@@ -924,12 +913,12 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
924 bool adc_back_off) 913 bool adc_back_off)
925{ 914{
926 if (adc_back_off) { 915 if (adc_back_off) {
927 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 916 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
928 "[BTCoex], BB BackOff Level On!\n"); 917 "[BTCoex], BB BackOff Level On!\n");
929 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3); 918 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
930 } else { 919 } else {
931 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 920 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
932 "[BTCoex], BB BackOff Level Off!\n"); 921 "[BTCoex], BB BackOff Level Off!\n");
933 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1); 922 btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
934 } 923 }
935} 924}
@@ -937,16 +926,17 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
937static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist, 926static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist,
938 bool force_exec, bool adc_back_off) 927 bool force_exec, bool adc_back_off)
939{ 928{
940 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 929 btc_alg_dbg(ALGO_TRACE_SW,
941 "[BTCoex], %s turn AdcBackOff = %s\n", 930 "[BTCoex], %s turn AdcBackOff = %s\n",
942 (force_exec ? "force to" : ""), 931 (force_exec ? "force to" : ""),
943 ((adc_back_off) ? "ON" : "OFF")); 932 ((adc_back_off) ? "ON" : "OFF"));
944 coex_dm->cur_adc_back_off = adc_back_off; 933 coex_dm->cur_adc_back_off = adc_back_off;
945 934
946 if (!force_exec) { 935 if (!force_exec) {
947 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 936 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
948 "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n", 937 "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
949 coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off); 938 coex_dm->pre_adc_back_off,
939 coex_dm->cur_adc_back_off);
950 940
951 if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off) 941 if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
952 return; 942 return;
@@ -960,20 +950,20 @@ static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
960 u32 val0x6c0, u32 val0x6c4, 950 u32 val0x6c0, u32 val0x6c4,
961 u32 val0x6c8, u8 val0x6cc) 951 u32 val0x6c8, u8 val0x6cc)
962{ 952{
963 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 953 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
964 "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); 954 "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
965 btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); 955 btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
966 956
967 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 957 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
968 "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); 958 "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
969 btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); 959 btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
970 960
971 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 961 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
972 "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); 962 "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
973 btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); 963 btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
974 964
975 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, 965 btc_alg_dbg(ALGO_TRACE_SW_EXEC,
976 "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); 966 "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
977 btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); 967 btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
978} 968}
979 969
@@ -981,28 +971,28 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
981 bool force_exec, u32 val0x6c0, 971 bool force_exec, u32 val0x6c0,
982 u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) 972 u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
983{ 973{
984 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, 974 btc_alg_dbg(ALGO_TRACE_SW,
985 "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", 975 "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
986 (force_exec ? "force to" : ""), 976 (force_exec ? "force to" : ""),
987 val0x6c0, val0x6c4, val0x6c8, val0x6cc); 977 val0x6c0, val0x6c4, val0x6c8, val0x6cc);
988 coex_dm->cur_val0x6c0 = val0x6c0; 978 coex_dm->cur_val0x6c0 = val0x6c0;
989 coex_dm->cur_val0x6c4 = val0x6c4; 979 coex_dm->cur_val0x6c4 = val0x6c4;
990 coex_dm->cur_val0x6c8 = val0x6c8; 980 coex_dm->cur_val0x6c8 = val0x6c8;
991 coex_dm->cur_val0x6cc = val0x6cc; 981 coex_dm->cur_val0x6cc = val0x6cc;
992 982
993 if (!force_exec) { 983 if (!force_exec) {
994 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 984 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
995 "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n", 985 "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
996 coex_dm->pre_val0x6c0, 986 coex_dm->pre_val0x6c0,
997 coex_dm->pre_val0x6c4, 987 coex_dm->pre_val0x6c4,
998 coex_dm->pre_val0x6c8, 988 coex_dm->pre_val0x6c8,
999 coex_dm->pre_val0x6cc); 989 coex_dm->pre_val0x6cc);
1000 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, 990 btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
1001 "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n", 991 "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
1002 coex_dm->cur_val0x6c0, 992 coex_dm->cur_val0x6c0,
1003 coex_dm->cur_val0x6c4, 993 coex_dm->cur_val0x6c4,
1004 coex_dm->cur_val0x6c8, 994 coex_dm->cur_val0x6c8,
1005 coex_dm->cur_val0x6cc); 995 coex_dm->cur_val0x6cc);
1006 996
1007 if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && 997 if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
1008 (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && 998 (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1027,9 +1017,9 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
1027 if (enable) 1017 if (enable)
1028 h2c_parameter[0] |= BIT0;/* function enable */ 1018 h2c_parameter[0] |= BIT0;/* function enable */
1029 1019
1030 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 1020 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
1031 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", 1021 "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
1032 h2c_parameter[0]); 1022 h2c_parameter[0]);
1033 1023
1034 btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter); 1024 btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter);
1035} 1025}
@@ -1037,16 +1027,16 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
1037static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist, 1027static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
1038 bool force_exec, bool enable) 1028 bool force_exec, bool enable)
1039{ 1029{
1040 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 1030 btc_alg_dbg(ALGO_TRACE_FW,
1041 "[BTCoex], %s turn Ignore WlanAct %s\n", 1031 "[BTCoex], %s turn Ignore WlanAct %s\n",
1042 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); 1032 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
1043 coex_dm->cur_ignore_wlan_act = enable; 1033 coex_dm->cur_ignore_wlan_act = enable;
1044 1034
1045 if (!force_exec) { 1035 if (!force_exec) {
1046 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1036 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1047 "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", 1037 "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
1048 coex_dm->pre_ignore_wlan_act, 1038 coex_dm->pre_ignore_wlan_act,
1049 coex_dm->cur_ignore_wlan_act); 1039 coex_dm->cur_ignore_wlan_act);
1050 1040
1051 if (coex_dm->pre_ignore_wlan_act == 1041 if (coex_dm->pre_ignore_wlan_act ==
1052 coex_dm->cur_ignore_wlan_act) 1042 coex_dm->cur_ignore_wlan_act)
@@ -1075,13 +1065,13 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
1075 coex_dm->ps_tdma_para[3] = byte4; 1065 coex_dm->ps_tdma_para[3] = byte4;
1076 coex_dm->ps_tdma_para[4] = byte5; 1066 coex_dm->ps_tdma_para[4] = byte5;
1077 1067
1078 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 1068 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
1079 "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n", 1069 "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
1080 h2c_parameter[0], 1070 h2c_parameter[0],
1081 h2c_parameter[1]<<24| 1071 h2c_parameter[1] << 24 |
1082 h2c_parameter[2]<<16| 1072 h2c_parameter[2] << 16 |
1083 h2c_parameter[3]<<8| 1073 h2c_parameter[3] << 8 |
1084 h2c_parameter[4]); 1074 h2c_parameter[4]);
1085 1075
1086 btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); 1076 btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
1087} 1077}
@@ -1175,20 +1165,20 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
1175static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist, 1165static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
1176 bool force_exec, bool turn_on, u8 type) 1166 bool force_exec, bool turn_on, u8 type)
1177{ 1167{
1178 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 1168 btc_alg_dbg(ALGO_TRACE_FW,
1179 "[BTCoex], %s turn %s PS TDMA, type = %d\n", 1169 "[BTCoex], %s turn %s PS TDMA, type = %d\n",
1180 (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"), 1170 (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
1181 type); 1171 type);
1182 coex_dm->cur_ps_tdma_on = turn_on; 1172 coex_dm->cur_ps_tdma_on = turn_on;
1183 coex_dm->cur_ps_tdma = type; 1173 coex_dm->cur_ps_tdma = type;
1184 1174
1185 if (!force_exec) { 1175 if (!force_exec) {
1186 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1176 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1187 "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n", 1177 "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
1188 coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); 1178 coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
1189 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1179 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1190 "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n", 1180 "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
1191 coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); 1181 coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
1192 1182
1193 if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && 1183 if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
1194 (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) 1184 (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1374,8 +1364,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
1374 btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, 1364 btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
1375 &low_pwr_disable); 1365 &low_pwr_disable);
1376 1366
1377 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1367 btc_alg_dbg(ALGO_TRACE,
1378 "[BTCoex], Wifi IPS + BT IPS!!\n"); 1368 "[BTCoex], Wifi IPS + BT IPS!!\n");
1379 1369
1380 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); 1370 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
1381 halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); 1371 halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1392,13 +1382,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
1392 &low_pwr_disable); 1382 &low_pwr_disable);
1393 1383
1394 if (wifi_busy) { 1384 if (wifi_busy) {
1395 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1385 btc_alg_dbg(ALGO_TRACE,
1396 "[BTCoex], Wifi Busy + BT IPS!!\n"); 1386 "[BTCoex], Wifi Busy + BT IPS!!\n");
1397 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1387 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1398 false, 1); 1388 false, 1);
1399 } else { 1389 } else {
1400 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1390 btc_alg_dbg(ALGO_TRACE,
1401 "[BTCoex], Wifi LPS + BT IPS!!\n"); 1391 "[BTCoex], Wifi LPS + BT IPS!!\n");
1402 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1392 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1403 false, 1); 1393 false, 1);
1404 } 1394 }
@@ -1416,8 +1406,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
1416 btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, 1406 btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
1417 &low_pwr_disable); 1407 &low_pwr_disable);
1418 1408
1419 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1409 btc_alg_dbg(ALGO_TRACE,
1420 "[BTCoex], Wifi IPS + BT LPS!!\n"); 1410 "[BTCoex], Wifi IPS + BT LPS!!\n");
1421 1411
1422 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); 1412 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
1423 halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); 1413 halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1433,13 +1423,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
1433 BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); 1423 BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
1434 1424
1435 if (wifi_busy) { 1425 if (wifi_busy) {
1436 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1426 btc_alg_dbg(ALGO_TRACE,
1437 "[BTCoex], Wifi Busy + BT LPS!!\n"); 1427 "[BTCoex], Wifi Busy + BT LPS!!\n");
1438 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1428 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1439 false, 1); 1429 false, 1);
1440 } else { 1430 } else {
1441 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1431 btc_alg_dbg(ALGO_TRACE,
1442 "[BTCoex], Wifi LPS + BT LPS!!\n"); 1432 "[BTCoex], Wifi LPS + BT LPS!!\n");
1443 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1433 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1444 false, 1); 1434 false, 1);
1445 } 1435 }
@@ -1458,8 +1448,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
1458 btcoexist->btc_set(btcoexist, 1448 btcoexist->btc_set(btcoexist,
1459 BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); 1449 BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
1460 1450
1461 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1451 btc_alg_dbg(ALGO_TRACE,
1462 "[BTCoex], Wifi IPS + BT Busy!!\n"); 1452 "[BTCoex], Wifi IPS + BT Busy!!\n");
1463 1453
1464 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); 1454 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
1465 halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); 1455 halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1478,12 +1468,12 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
1478 &low_pwr_disable); 1468 &low_pwr_disable);
1479 1469
1480 if (wifi_busy) { 1470 if (wifi_busy) {
1481 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1471 btc_alg_dbg(ALGO_TRACE,
1482 "[BTCoex], Wifi Busy + BT Busy!!\n"); 1472 "[BTCoex], Wifi Busy + BT Busy!!\n");
1483 common = false; 1473 common = false;
1484 } else { 1474 } else {
1485 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 1475 btc_alg_dbg(ALGO_TRACE,
1486 "[BTCoex], Wifi LPS + BT Busy!!\n"); 1476 "[BTCoex], Wifi LPS + BT Busy!!\n");
1487 halbtc8821a2ant_ps_tdma(btcoexist, 1477 halbtc8821a2ant_ps_tdma(btcoexist,
1488 NORMAL_EXEC, true, 21); 1478 NORMAL_EXEC, true, 21);
1489 1479
@@ -1505,8 +1495,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
1505 int result) 1495 int result)
1506{ 1496{
1507 if (tx_pause) { 1497 if (tx_pause) {
1508 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1498 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1509 "[BTCoex], TxPause = 1\n"); 1499 "[BTCoex], TxPause = 1\n");
1510 1500
1511 if (coex_dm->cur_ps_tdma == 71) { 1501 if (coex_dm->cur_ps_tdma == 71) {
1512 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1502 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1601,8 +1591,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
1601 } 1591 }
1602 } 1592 }
1603 } else { 1593 } else {
1604 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1594 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1605 "[BTCoex], TxPause = 0\n"); 1595 "[BTCoex], TxPause = 0\n");
1606 if (coex_dm->cur_ps_tdma == 5) { 1596 if (coex_dm->cur_ps_tdma == 5) {
1607 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1597 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1608 true, 71); 1598 true, 71);
@@ -1706,8 +1696,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
1706 int result) 1696 int result)
1707{ 1697{
1708 if (tx_pause) { 1698 if (tx_pause) {
1709 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1699 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1710 "[BTCoex], TxPause = 1\n"); 1700 "[BTCoex], TxPause = 1\n");
1711 if (coex_dm->cur_ps_tdma == 1) { 1701 if (coex_dm->cur_ps_tdma == 1) {
1712 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1702 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1713 true, 6); 1703 true, 6);
@@ -1796,8 +1786,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
1796 } 1786 }
1797 } 1787 }
1798 } else { 1788 } else {
1799 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1789 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1800 "[BTCoex], TxPause = 0\n"); 1790 "[BTCoex], TxPause = 0\n");
1801 if (coex_dm->cur_ps_tdma == 5) { 1791 if (coex_dm->cur_ps_tdma == 5) {
1802 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1792 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1803 true, 2); 1793 true, 2);
@@ -1892,8 +1882,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
1892 int result) 1882 int result)
1893{ 1883{
1894 if (tx_pause) { 1884 if (tx_pause) {
1895 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1885 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1896 "[BTCoex], TxPause = 1\n"); 1886 "[BTCoex], TxPause = 1\n");
1897 if (coex_dm->cur_ps_tdma == 1) { 1887 if (coex_dm->cur_ps_tdma == 1) {
1898 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1888 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1899 true, 7); 1889 true, 7);
@@ -1982,8 +1972,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
1982 } 1972 }
1983 } 1973 }
1984 } else { 1974 } else {
1985 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 1975 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
1986 "[BTCoex], TxPause = 0\n"); 1976 "[BTCoex], TxPause = 0\n");
1987 if (coex_dm->cur_ps_tdma == 5) { 1977 if (coex_dm->cur_ps_tdma == 5) {
1988 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, 1978 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
1989 true, 3); 1979 true, 3);
@@ -2085,13 +2075,13 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
2085 int result; 2075 int result;
2086 u8 retry_count = 0; 2076 u8 retry_count = 0;
2087 2077
2088 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, 2078 btc_alg_dbg(ALGO_TRACE_FW,
2089 "[BTCoex], TdmaDurationAdjust()\n"); 2079 "[BTCoex], TdmaDurationAdjust()\n");
2090 2080
2091 if (coex_dm->reset_tdma_adjust) { 2081 if (coex_dm->reset_tdma_adjust) {
2092 coex_dm->reset_tdma_adjust = false; 2082 coex_dm->reset_tdma_adjust = false;
2093 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2083 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2094 "[BTCoex], first run TdmaDurationAdjust()!!\n"); 2084 "[BTCoex], first run TdmaDurationAdjust()!!\n");
2095 if (sco_hid) { 2085 if (sco_hid) {
2096 if (tx_pause) { 2086 if (tx_pause) {
2097 if (max_interval == 1) { 2087 if (max_interval == 1) {
@@ -2195,11 +2185,11 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
2195 } else { 2185 } else {
2196 /* accquire the BT TRx retry count from BT_Info byte2 */ 2186 /* accquire the BT TRx retry count from BT_Info byte2 */
2197 retry_count = coex_sta->bt_retry_cnt; 2187 retry_count = coex_sta->bt_retry_cnt;
2198 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2188 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2199 "[BTCoex], retry_count = %d\n", retry_count); 2189 "[BTCoex], retry_count = %d\n", retry_count);
2200 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2190 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2201 "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n", 2191 "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
2202 (int)up, (int)dn, (int)m, (int)n, (int)wait_count); 2192 (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
2203 result = 0; 2193 result = 0;
2204 wait_count++; 2194 wait_count++;
2205 2195
@@ -2220,9 +2210,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
2220 up = 0; 2210 up = 0;
2221 dn = 0; 2211 dn = 0;
2222 result = 1; 2212 result = 1;
2223 BTC_PRINT(BTC_MSG_ALGORITHM, 2213 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2224 ALGO_TRACE_FW_DETAIL, 2214 "[BTCoex], Increase wifi duration!!\n");
2225 "[BTCoex], Increase wifi duration!!\n");
2226 } 2215 }
2227 } else if (retry_count <= 3) { 2216 } else if (retry_count <= 3) {
2228 /* <=3 retry in the last 2-second duration */ 2217 /* <=3 retry in the last 2-second duration */
@@ -2251,9 +2240,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
2251 dn = 0; 2240 dn = 0;
2252 wait_count = 0; 2241 wait_count = 0;
2253 result = -1; 2242 result = -1;
2254 BTC_PRINT(BTC_MSG_ALGORITHM, 2243 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2255 ALGO_TRACE_FW_DETAIL, 2244 "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
2256 "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
2257 } 2245 }
2258 } else { 2246 } else {
2259 /* retry count > 3, if retry count > 3 happens once, 2247 /* retry count > 3, if retry count > 3 happens once,
@@ -2274,12 +2262,12 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
2274 dn = 0; 2262 dn = 0;
2275 wait_count = 0; 2263 wait_count = 0;
2276 result = -1; 2264 result = -1;
2277 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2265 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2278 "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); 2266 "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
2279 } 2267 }
2280 2268
2281 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2269 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2282 "[BTCoex], max Interval = %d\n", max_interval); 2270 "[BTCoex], max Interval = %d\n", max_interval);
2283 if (max_interval == 1) 2271 if (max_interval == 1)
2284 btc8821a2_int1(btcoexist, tx_pause, result); 2272 btc8821a2_int1(btcoexist, tx_pause, result);
2285 else if (max_interval == 2) 2273 else if (max_interval == 2)
@@ -2295,9 +2283,9 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
2295 if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { 2283 if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
2296 bool scan = false, link = false, roam = false; 2284 bool scan = false, link = false, roam = false;
2297 2285
2298 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2286 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2299 "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n", 2287 "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
2300 coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); 2288 coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
2301 2289
2302 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); 2290 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
2303 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); 2291 btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2307,8 +2295,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
2307 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2295 halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
2308 coex_dm->tdma_adj_type); 2296 coex_dm->tdma_adj_type);
2309 } else { 2297 } else {
2310 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, 2298 btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
2311 "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); 2299 "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
2312 } 2300 }
2313 } 2301 }
2314 2302
@@ -3183,8 +3171,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
3183 u8 algorithm = 0; 3171 u8 algorithm = 0;
3184 3172
3185 if (btcoexist->manual_control) { 3173 if (btcoexist->manual_control) {
3186 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3174 btc_alg_dbg(ALGO_TRACE,
3187 "[BTCoex], Manual control!!!\n"); 3175 "[BTCoex], Manual control!!!\n");
3188 return; 3176 return;
3189 } 3177 }
3190 3178
@@ -3192,8 +3180,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
3192 BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); 3180 BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
3193 3181
3194 if (wifi_under_5g) { 3182 if (wifi_under_5g) {
3195 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3183 btc_alg_dbg(ALGO_TRACE,
3196 "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n"); 3184 "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
3197 halbtc8821a2ant_coex_under_5g(btcoexist); 3185 halbtc8821a2ant_coex_under_5g(btcoexist);
3198 return; 3186 return;
3199 } 3187 }
@@ -3201,81 +3189,82 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
3201 algorithm = halbtc8821a2ant_action_algorithm(btcoexist); 3189 algorithm = halbtc8821a2ant_action_algorithm(btcoexist);
3202 if (coex_sta->c2h_bt_inquiry_page && 3190 if (coex_sta->c2h_bt_inquiry_page &&
3203 (BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) { 3191 (BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) {
3204 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3192 btc_alg_dbg(ALGO_TRACE,
3205 "[BTCoex], BT is under inquiry/page scan !!\n"); 3193 "[BTCoex], BT is under inquiry/page scan !!\n");
3206 halbtc8821a2ant_bt_inquiry_page(btcoexist); 3194 halbtc8821a2ant_bt_inquiry_page(btcoexist);
3207 return; 3195 return;
3208 } 3196 }
3209 3197
3210 coex_dm->cur_algorithm = algorithm; 3198 coex_dm->cur_algorithm = algorithm;
3211 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3199 btc_alg_dbg(ALGO_TRACE,
3212 "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); 3200 "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
3213 3201
3214 if (halbtc8821a2ant_is_common_action(btcoexist)) { 3202 if (halbtc8821a2ant_is_common_action(btcoexist)) {
3215 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3203 btc_alg_dbg(ALGO_TRACE,
3216 "[BTCoex], Action 2-Ant common.\n"); 3204 "[BTCoex], Action 2-Ant common\n");
3217 coex_dm->reset_tdma_adjust = true; 3205 coex_dm->reset_tdma_adjust = true;
3218 } else { 3206 } else {
3219 if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { 3207 if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
3220 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3208 btc_alg_dbg(ALGO_TRACE,
3221 "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n", 3209 "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
3222 coex_dm->pre_algorithm, coex_dm->cur_algorithm); 3210 coex_dm->pre_algorithm,
3211 coex_dm->cur_algorithm);
3223 coex_dm->reset_tdma_adjust = true; 3212 coex_dm->reset_tdma_adjust = true;
3224 } 3213 }
3225 switch (coex_dm->cur_algorithm) { 3214 switch (coex_dm->cur_algorithm) {
3226 case BT_8821A_2ANT_COEX_ALGO_SCO: 3215 case BT_8821A_2ANT_COEX_ALGO_SCO:
3227 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3216 btc_alg_dbg(ALGO_TRACE,
3228 "[BTCoex], Action 2-Ant, algorithm = SCO.\n"); 3217 "[BTCoex], Action 2-Ant, algorithm = SCO\n");
3229 halbtc8821a2ant_action_sco(btcoexist); 3218 halbtc8821a2ant_action_sco(btcoexist);
3230 break; 3219 break;
3231 case BT_8821A_2ANT_COEX_ALGO_HID: 3220 case BT_8821A_2ANT_COEX_ALGO_HID:
3232 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3221 btc_alg_dbg(ALGO_TRACE,
3233 "[BTCoex], Action 2-Ant, algorithm = HID.\n"); 3222 "[BTCoex], Action 2-Ant, algorithm = HID\n");
3234 halbtc8821a2ant_action_hid(btcoexist); 3223 halbtc8821a2ant_action_hid(btcoexist);
3235 break; 3224 break;
3236 case BT_8821A_2ANT_COEX_ALGO_A2DP: 3225 case BT_8821A_2ANT_COEX_ALGO_A2DP:
3237 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3226 btc_alg_dbg(ALGO_TRACE,
3238 "[BTCoex], Action 2-Ant, algorithm = A2DP.\n"); 3227 "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
3239 halbtc8821a2ant_action_a2dp(btcoexist); 3228 halbtc8821a2ant_action_a2dp(btcoexist);
3240 break; 3229 break;
3241 case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS: 3230 case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS:
3242 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3231 btc_alg_dbg(ALGO_TRACE,
3243 "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS).\n"); 3232 "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
3244 halbtc8821a2ant_action_a2dp_pan_hs(btcoexist); 3233 halbtc8821a2ant_action_a2dp_pan_hs(btcoexist);
3245 break; 3234 break;
3246 case BT_8821A_2ANT_COEX_ALGO_PANEDR: 3235 case BT_8821A_2ANT_COEX_ALGO_PANEDR:
3247 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3236 btc_alg_dbg(ALGO_TRACE,
3248 "[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n"); 3237 "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
3249 halbtc8821a2ant_action_pan_edr(btcoexist); 3238 halbtc8821a2ant_action_pan_edr(btcoexist);
3250 break; 3239 break;
3251 case BT_8821A_2ANT_COEX_ALGO_PANHS: 3240 case BT_8821A_2ANT_COEX_ALGO_PANHS:
3252 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3241 btc_alg_dbg(ALGO_TRACE,
3253 "[BTCoex], Action 2-Ant, algorithm = HS mode.\n"); 3242 "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
3254 halbtc8821a2ant_action_pan_hs(btcoexist); 3243 halbtc8821a2ant_action_pan_hs(btcoexist);
3255 break; 3244 break;
3256 case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP: 3245 case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP:
3257 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3246 btc_alg_dbg(ALGO_TRACE,
3258 "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n"); 3247 "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
3259 halbtc8821a2ant_action_pan_edr_a2dp(btcoexist); 3248 halbtc8821a2ant_action_pan_edr_a2dp(btcoexist);
3260 break; 3249 break;
3261 case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID: 3250 case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID:
3262 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3251 btc_alg_dbg(ALGO_TRACE,
3263 "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n"); 3252 "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
3264 halbtc8821a2ant_action_pan_edr_hid(btcoexist); 3253 halbtc8821a2ant_action_pan_edr_hid(btcoexist);
3265 break; 3254 break;
3266 case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR: 3255 case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
3267 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3256 btc_alg_dbg(ALGO_TRACE,
3268 "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n"); 3257 "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
3269 btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist); 3258 btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist);
3270 break; 3259 break;
3271 case BT_8821A_2ANT_COEX_ALGO_HID_A2DP: 3260 case BT_8821A_2ANT_COEX_ALGO_HID_A2DP:
3272 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3261 btc_alg_dbg(ALGO_TRACE,
3273 "[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n"); 3262 "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
3274 halbtc8821a2ant_action_hid_a2dp(btcoexist); 3263 halbtc8821a2ant_action_hid_a2dp(btcoexist);
3275 break; 3264 break;
3276 default: 3265 default:
3277 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3266 btc_alg_dbg(ALGO_TRACE,
3278 "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"); 3267 "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
3279 halbtc8821a2ant_coex_all_off(btcoexist); 3268 halbtc8821a2ant_coex_all_off(btcoexist);
3280 break; 3269 break;
3281 } 3270 }
@@ -3294,8 +3283,8 @@ void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
3294{ 3283{
3295 u8 u1tmp = 0; 3284 u8 u1tmp = 0;
3296 3285
3297 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3286 btc_iface_dbg(INTF_INIT,
3298 "[BTCoex], 2Ant Init HW Config!!\n"); 3287 "[BTCoex], 2Ant Init HW Config!!\n");
3299 3288
3300 /* backup rf 0x1e value */ 3289 /* backup rf 0x1e value */
3301 coex_dm->bt_rf0x1e_backup = 3290 coex_dm->bt_rf0x1e_backup =
@@ -3328,8 +3317,8 @@ ex_halbtc8821a2ant_init_coex_dm(
3328 struct btc_coexist *btcoexist 3317 struct btc_coexist *btcoexist
3329 ) 3318 )
3330{ 3319{
3331 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3320 btc_iface_dbg(INTF_INIT,
3332 "[BTCoex], Coex Mechanism Init!!\n"); 3321 "[BTCoex], Coex Mechanism Init!!\n");
3333 3322
3334 halbtc8821a2ant_init_coex_dm(btcoexist); 3323 halbtc8821a2ant_init_coex_dm(btcoexist);
3335} 3324}
@@ -3574,13 +3563,13 @@ ex_halbtc8821a2ant_display_coex_info(
3574void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) 3563void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
3575{ 3564{
3576 if (BTC_IPS_ENTER == type) { 3565 if (BTC_IPS_ENTER == type) {
3577 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3566 btc_iface_dbg(INTF_NOTIFY,
3578 "[BTCoex], IPS ENTER notify\n"); 3567 "[BTCoex], IPS ENTER notify\n");
3579 coex_sta->under_ips = true; 3568 coex_sta->under_ips = true;
3580 halbtc8821a2ant_coex_all_off(btcoexist); 3569 halbtc8821a2ant_coex_all_off(btcoexist);
3581 } else if (BTC_IPS_LEAVE == type) { 3570 } else if (BTC_IPS_LEAVE == type) {
3582 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3571 btc_iface_dbg(INTF_NOTIFY,
3583 "[BTCoex], IPS LEAVE notify\n"); 3572 "[BTCoex], IPS LEAVE notify\n");
3584 coex_sta->under_ips = false; 3573 coex_sta->under_ips = false;
3585 /*halbtc8821a2ant_init_coex_dm(btcoexist);*/ 3574 /*halbtc8821a2ant_init_coex_dm(btcoexist);*/
3586 } 3575 }
@@ -3589,12 +3578,12 @@ void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
3589void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) 3578void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
3590{ 3579{
3591 if (BTC_LPS_ENABLE == type) { 3580 if (BTC_LPS_ENABLE == type) {
3592 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3581 btc_iface_dbg(INTF_NOTIFY,
3593 "[BTCoex], LPS ENABLE notify\n"); 3582 "[BTCoex], LPS ENABLE notify\n");
3594 coex_sta->under_lps = true; 3583 coex_sta->under_lps = true;
3595 } else if (BTC_LPS_DISABLE == type) { 3584 } else if (BTC_LPS_DISABLE == type) {
3596 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3585 btc_iface_dbg(INTF_NOTIFY,
3597 "[BTCoex], LPS DISABLE notify\n"); 3586 "[BTCoex], LPS DISABLE notify\n");
3598 coex_sta->under_lps = false; 3587 coex_sta->under_lps = false;
3599 } 3588 }
3600} 3589}
@@ -3602,22 +3591,22 @@ void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
3602void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) 3591void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
3603{ 3592{
3604 if (BTC_SCAN_START == type) { 3593 if (BTC_SCAN_START == type) {
3605 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3594 btc_iface_dbg(INTF_NOTIFY,
3606 "[BTCoex], SCAN START notify\n"); 3595 "[BTCoex], SCAN START notify\n");
3607 } else if (BTC_SCAN_FINISH == type) { 3596 } else if (BTC_SCAN_FINISH == type) {
3608 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3597 btc_iface_dbg(INTF_NOTIFY,
3609 "[BTCoex], SCAN FINISH notify\n"); 3598 "[BTCoex], SCAN FINISH notify\n");
3610 } 3599 }
3611} 3600}
3612 3601
3613void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) 3602void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
3614{ 3603{
3615 if (BTC_ASSOCIATE_START == type) { 3604 if (BTC_ASSOCIATE_START == type) {
3616 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3605 btc_iface_dbg(INTF_NOTIFY,
3617 "[BTCoex], CONNECT START notify\n"); 3606 "[BTCoex], CONNECT START notify\n");
3618 } else if (BTC_ASSOCIATE_FINISH == type) { 3607 } else if (BTC_ASSOCIATE_FINISH == type) {
3619 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3608 btc_iface_dbg(INTF_NOTIFY,
3620 "[BTCoex], CONNECT FINISH notify\n"); 3609 "[BTCoex], CONNECT FINISH notify\n");
3621 } 3610 }
3622} 3611}
3623 3612
@@ -3629,11 +3618,11 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
3629 u8 wifi_central_chnl; 3618 u8 wifi_central_chnl;
3630 3619
3631 if (BTC_MEDIA_CONNECT == type) { 3620 if (BTC_MEDIA_CONNECT == type) {
3632 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3621 btc_iface_dbg(INTF_NOTIFY,
3633 "[BTCoex], MEDIA connect notify\n"); 3622 "[BTCoex], MEDIA connect notify\n");
3634 } else { 3623 } else {
3635 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3624 btc_iface_dbg(INTF_NOTIFY,
3636 "[BTCoex], MEDIA disconnect notify\n"); 3625 "[BTCoex], MEDIA disconnect notify\n");
3637 } 3626 }
3638 3627
3639 /* only 2.4G we need to inform bt the chnl mask*/ 3628 /* only 2.4G we need to inform bt the chnl mask*/
@@ -3654,9 +3643,11 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
3654 coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; 3643 coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
3655 coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; 3644 coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
3656 3645
3657 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, 3646 btc_alg_dbg(ALGO_TRACE_FW_EXEC,
3658 "[BTCoex], FW write 0x66 = 0x%x\n", 3647 "[BTCoex], FW write 0x66 = 0x%x\n",
3659 h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]); 3648 h2c_parameter[0] << 16 |
3649 h2c_parameter[1] << 8 |
3650 h2c_parameter[2]);
3660 3651
3661 btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); 3652 btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
3662} 3653}
@@ -3664,8 +3655,8 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
3664void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist, 3655void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
3665 u8 type) { 3656 u8 type) {
3666 if (type == BTC_PACKET_DHCP) { 3657 if (type == BTC_PACKET_DHCP) {
3667 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3658 btc_iface_dbg(INTF_NOTIFY,
3668 "[BTCoex], DHCP Packet notify\n"); 3659 "[BTCoex], DHCP Packet notify\n");
3669 } 3660 }
3670} 3661}
3671 3662
@@ -3685,19 +3676,19 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
3685 rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW; 3676 rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW;
3686 coex_sta->bt_info_c2h_cnt[rsp_source]++; 3677 coex_sta->bt_info_c2h_cnt[rsp_source]++;
3687 3678
3688 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3679 btc_iface_dbg(INTF_NOTIFY,
3689 "[BTCoex], Bt info[%d], length = %d, hex data = [", 3680 "[BTCoex], Bt info[%d], length = %d, hex data = [",
3690 rsp_source, length); 3681 rsp_source, length);
3691 for (i = 0; i < length; i++) { 3682 for (i = 0; i < length; i++) {
3692 coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; 3683 coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
3693 if (i == 1) 3684 if (i == 1)
3694 bt_info = tmp_buf[i]; 3685 bt_info = tmp_buf[i];
3695 if (i == length-1) { 3686 if (i == length-1) {
3696 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3687 btc_iface_dbg(INTF_NOTIFY,
3697 "0x%02x]\n", tmp_buf[i]); 3688 "0x%02x]\n", tmp_buf[i]);
3698 } else { 3689 } else {
3699 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3690 btc_iface_dbg(INTF_NOTIFY,
3700 "0x%02x, ", tmp_buf[i]); 3691 "0x%02x, ", tmp_buf[i]);
3701 } 3692 }
3702 } 3693 }
3703 3694
@@ -3823,8 +3814,8 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
3823 3814
3824void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist) 3815void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
3825{ 3816{
3826 BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, 3817 btc_iface_dbg(INTF_NOTIFY,
3827 "[BTCoex], Halt notify\n"); 3818 "[BTCoex], Halt notify\n");
3828 3819
3829 halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); 3820 halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
3830 ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); 3821 ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3837,31 +3828,31 @@ void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist)
3837 struct btc_board_info *board_info = &btcoexist->board_info; 3828 struct btc_board_info *board_info = &btcoexist->board_info;
3838 struct btc_stack_info *stack_info = &btcoexist->stack_info; 3829 struct btc_stack_info *stack_info = &btcoexist->stack_info;
3839 3830
3840 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 3831 btc_alg_dbg(ALGO_TRACE,
3841 "[BTCoex], ==========================Periodical===========================\n"); 3832 "[BTCoex], ==========================Periodical===========================\n");
3842 3833
3843 if (dis_ver_info_cnt <= 5) { 3834 if (dis_ver_info_cnt <= 5) {
3844 dis_ver_info_cnt += 1; 3835 dis_ver_info_cnt += 1;
3845 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3836 btc_iface_dbg(INTF_INIT,
3846 "[BTCoex], ****************************************************************\n"); 3837 "[BTCoex], ****************************************************************\n");
3847 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3838 btc_iface_dbg(INTF_INIT,
3848 "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", 3839 "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
3849 board_info->pg_ant_num, 3840 board_info->pg_ant_num,
3850 board_info->btdm_ant_num, 3841 board_info->btdm_ant_num,
3851 board_info->btdm_ant_pos); 3842 board_info->btdm_ant_pos);
3852 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3843 btc_iface_dbg(INTF_INIT,
3853 "[BTCoex], BT stack/ hci ext ver = %s / %d\n", 3844 "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
3854 ((stack_info->profile_notified) ? "Yes" : "No"), 3845 stack_info->profile_notified ? "Yes" : "No",
3855 stack_info->hci_version); 3846 stack_info->hci_version);
3856 btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, 3847 btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
3857 &bt_patch_ver); 3848 &bt_patch_ver);
3858 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); 3849 btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
3859 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3850 btc_iface_dbg(INTF_INIT,
3860 "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", 3851 "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
3861 glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant, 3852 glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
3862 fw_ver, bt_patch_ver, bt_patch_ver); 3853 fw_ver, bt_patch_ver, bt_patch_ver);
3863 BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, 3854 btc_iface_dbg(INTF_INIT,
3864 "[BTCoex], ****************************************************************\n"); 3855 "[BTCoex], ****************************************************************\n");
3865 } 3856 }
3866 3857
3867 halbtc8821a2ant_query_bt_info(btcoexist); 3858 halbtc8821a2ant_query_bt_info(btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index b2791c893417..b660c214dc71 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -141,8 +141,8 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
141 141
142 if (rtlphy->current_channel != 0) 142 if (rtlphy->current_channel != 0)
143 chnl = rtlphy->current_channel; 143 chnl = rtlphy->current_channel;
144 BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, 144 btc_alg_dbg(ALGO_TRACE,
145 "static halbtc_get_wifi_central_chnl:%d\n", chnl); 145 "static halbtc_get_wifi_central_chnl:%d\n", chnl);
146 return chnl; 146 return chnl;
147} 147}
148 148
@@ -965,13 +965,38 @@ void exhalbtc_set_chip_type(u8 chip_type)
965 } 965 }
966} 966}
967 967
968void exhalbtc_set_ant_num(u8 type, u8 ant_num) 968void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num)
969{ 969{
970 if (BT_COEX_ANT_TYPE_PG == type) { 970 if (BT_COEX_ANT_TYPE_PG == type) {
971 gl_bt_coexist.board_info.pg_ant_num = ant_num; 971 gl_bt_coexist.board_info.pg_ant_num = ant_num;
972 gl_bt_coexist.board_info.btdm_ant_num = ant_num; 972 gl_bt_coexist.board_info.btdm_ant_num = ant_num;
973 /* The antenna position:
974 * Main (default) or Aux for pgAntNum=2 && btdmAntNum =1.
975 * The antenna position should be determined by
976 * auto-detect mechanism.
977 * The following is assumed to main,
978 * and those must be modified
979 * if y auto-detect mechanism is ready
980 */
981 if ((gl_bt_coexist.board_info.pg_ant_num == 2) &&
982 (gl_bt_coexist.board_info.btdm_ant_num == 1))
983 gl_bt_coexist.board_info.btdm_ant_pos =
984 BTC_ANTENNA_AT_MAIN_PORT;
985 else
986 gl_bt_coexist.board_info.btdm_ant_pos =
987 BTC_ANTENNA_AT_MAIN_PORT;
973 } else if (BT_COEX_ANT_TYPE_ANTDIV == type) { 988 } else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
974 gl_bt_coexist.board_info.btdm_ant_num = ant_num; 989 gl_bt_coexist.board_info.btdm_ant_num = ant_num;
990 gl_bt_coexist.board_info.btdm_ant_pos =
991 BTC_ANTENNA_AT_MAIN_PORT;
992 } else if (type == BT_COEX_ANT_TYPE_DETECTED) {
993 gl_bt_coexist.board_info.btdm_ant_num = ant_num;
994 if (rtlpriv->cfg->mod_params->ant_sel == 1)
995 gl_bt_coexist.board_info.btdm_ant_pos =
996 BTC_ANTENNA_AT_AUX_PORT;
997 else
998 gl_bt_coexist.board_info.btdm_ant_pos =
999 BTC_ANTENNA_AT_MAIN_PORT;
975 } 1000 }
976} 1001}
977 1002
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index 0a903ea179ef..3cbe34c535ec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -116,12 +116,17 @@ extern u32 btc_dbg_type[];
116#define WIFI_P2P_GO_CONNECTED BIT3 116#define WIFI_P2P_GO_CONNECTED BIT3
117#define WIFI_P2P_GC_CONNECTED BIT4 117#define WIFI_P2P_GC_CONNECTED BIT4
118 118
119#define BTC_PRINT(dbgtype, dbgflag, printstr, ...) \ 119#define btc_alg_dbg(dbgflag, fmt, ...) \
120 do { \ 120do { \
121 if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\ 121 if (unlikely(btc_dbg_type[BTC_MSG_ALGORITHM] & dbgflag)) \
122 printk(printstr, ##__VA_ARGS__); \ 122 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
123 } \ 123} while (0)
124 } while (0) 124#define btc_iface_dbg(dbgflag, fmt, ...) \
125do { \
126 if (unlikely(btc_dbg_type[BTC_MSG_INTERFACE] & dbgflag)) \
127 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
128} while (0)
129
125 130
126#define BTC_RSSI_HIGH(_rssi_) \ 131#define BTC_RSSI_HIGH(_rssi_) \
127 ((_rssi_ == BTC_RSSI_STATE_HIGH || \ 132 ((_rssi_ == BTC_RSSI_STATE_HIGH || \
@@ -535,7 +540,7 @@ void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
535void exhalbtc_update_min_bt_rssi(char bt_rssi); 540void exhalbtc_update_min_bt_rssi(char bt_rssi);
536void exhalbtc_set_bt_exist(bool bt_exist); 541void exhalbtc_set_bt_exist(bool bt_exist);
537void exhalbtc_set_chip_type(u8 chip_type); 542void exhalbtc_set_chip_type(u8 chip_type);
538void exhalbtc_set_ant_num(u8 type, u8 ant_num); 543void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num);
539void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist); 544void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
540void exhalbtc_signal_compensation(struct btc_coexist *btcoexist, 545void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
541 u8 *rssi_wifi, u8 *rssi_bt); 546 u8 *rssi_wifi, u8 *rssi_bt);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
index b9b0cb7af8ea..d3fd9211b3a4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
@@ -72,7 +72,10 @@ void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
72 __func__, bt_type); 72 __func__, bt_type);
73 exhalbtc_set_chip_type(bt_type); 73 exhalbtc_set_chip_type(bt_type);
74 74
75 exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num); 75 if (rtlpriv->cfg->mod_params->ant_sel == 1)
76 exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_DETECTED, 1);
77 else
78 exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
76} 79}
77 80
78void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv) 81void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 283d608b9973..1ac41b8bd19a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -359,30 +359,28 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
359 struct rtl_priv *rtlpriv = rtl_priv(hw); 359 struct rtl_priv *rtlpriv = rtl_priv(hw);
360 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); 360 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
361 bool find_buddy_priv = false; 361 bool find_buddy_priv = false;
362 struct rtl_priv *tpriv = NULL; 362 struct rtl_priv *tpriv;
363 struct rtl_pci_priv *tpcipriv = NULL; 363 struct rtl_pci_priv *tpcipriv = NULL;
364 364
365 if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) { 365 if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
366 list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list, 366 list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
367 list) { 367 list) {
368 if (tpriv) { 368 tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
369 tpcipriv = (struct rtl_pci_priv *)tpriv->priv; 369 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
370 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 370 "pcipriv->ndis_adapter.funcnumber %x\n",
371 "pcipriv->ndis_adapter.funcnumber %x\n", 371 pcipriv->ndis_adapter.funcnumber);
372 pcipriv->ndis_adapter.funcnumber); 372 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
373 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 373 "tpcipriv->ndis_adapter.funcnumber %x\n",
374 "tpcipriv->ndis_adapter.funcnumber %x\n", 374 tpcipriv->ndis_adapter.funcnumber);
375 tpcipriv->ndis_adapter.funcnumber); 375
376 376 if ((pcipriv->ndis_adapter.busnumber ==
377 if ((pcipriv->ndis_adapter.busnumber == 377 tpcipriv->ndis_adapter.busnumber) &&
378 tpcipriv->ndis_adapter.busnumber) && 378 (pcipriv->ndis_adapter.devnumber ==
379 (pcipriv->ndis_adapter.devnumber == 379 tpcipriv->ndis_adapter.devnumber) &&
380 tpcipriv->ndis_adapter.devnumber) && 380 (pcipriv->ndis_adapter.funcnumber !=
381 (pcipriv->ndis_adapter.funcnumber != 381 tpcipriv->ndis_adapter.funcnumber)) {
382 tpcipriv->ndis_adapter.funcnumber)) { 382 find_buddy_priv = true;
383 find_buddy_priv = true; 383 break;
384 break;
385 }
386 } 384 }
387 } 385 }
388 } 386 }
@@ -1213,7 +1211,8 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
1213 /*Tx/Rx related var */ 1211 /*Tx/Rx related var */
1214 _rtl_pci_init_trx_var(hw); 1212 _rtl_pci_init_trx_var(hw);
1215 1213
1216 /*IBSS*/ mac->beacon_interval = 100; 1214 /*IBSS*/
1215 mac->beacon_interval = 100;
1217 1216
1218 /*AMPDU*/ 1217 /*AMPDU*/
1219 mac->min_space_cfg = 0; 1218 mac->min_space_cfg = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 5be34118e0af..3524441fd516 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -154,13 +154,13 @@ static bool _rtl_is_radar_freq(u16 center_freq)
154static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy, 154static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
155 enum nl80211_reg_initiator initiator) 155 enum nl80211_reg_initiator initiator)
156{ 156{
157 enum ieee80211_band band; 157 enum nl80211_band band;
158 struct ieee80211_supported_band *sband; 158 struct ieee80211_supported_band *sband;
159 const struct ieee80211_reg_rule *reg_rule; 159 const struct ieee80211_reg_rule *reg_rule;
160 struct ieee80211_channel *ch; 160 struct ieee80211_channel *ch;
161 unsigned int i; 161 unsigned int i;
162 162
163 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 163 for (band = 0; band < NUM_NL80211_BANDS; band++) {
164 164
165 if (!wiphy->bands[band]) 165 if (!wiphy->bands[band])
166 continue; 166 continue;
@@ -210,9 +210,9 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
210 struct ieee80211_channel *ch; 210 struct ieee80211_channel *ch;
211 const struct ieee80211_reg_rule *reg_rule; 211 const struct ieee80211_reg_rule *reg_rule;
212 212
213 if (!wiphy->bands[IEEE80211_BAND_2GHZ]) 213 if (!wiphy->bands[NL80211_BAND_2GHZ])
214 return; 214 return;
215 sband = wiphy->bands[IEEE80211_BAND_2GHZ]; 215 sband = wiphy->bands[NL80211_BAND_2GHZ];
216 216
217 /* 217 /*
218 *If no country IE has been received always enable active scan 218 *If no country IE has been received always enable active scan
@@ -262,10 +262,10 @@ static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
262 struct ieee80211_channel *ch; 262 struct ieee80211_channel *ch;
263 unsigned int i; 263 unsigned int i;
264 264
265 if (!wiphy->bands[IEEE80211_BAND_5GHZ]) 265 if (!wiphy->bands[NL80211_BAND_5GHZ])
266 return; 266 return;
267 267
268 sband = wiphy->bands[IEEE80211_BAND_5GHZ]; 268 sband = wiphy->bands[NL80211_BAND_5GHZ];
269 269
270 for (i = 0; i < sband->n_channels; i++) { 270 for (i = 0; i < sband->n_channels; i++) {
271 ch = &sband->channels[i]; 271 ch = &sband->channels[i];
@@ -301,12 +301,12 @@ static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
301 301
302static void _rtl_dump_channel_map(struct wiphy *wiphy) 302static void _rtl_dump_channel_map(struct wiphy *wiphy)
303{ 303{
304 enum ieee80211_band band; 304 enum nl80211_band band;
305 struct ieee80211_supported_band *sband; 305 struct ieee80211_supported_band *sband;
306 struct ieee80211_channel *ch; 306 struct ieee80211_channel *ch;
307 unsigned int i; 307 unsigned int i;
308 308
309 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 309 for (band = 0; band < NUM_NL80211_BANDS; band++) {
310 if (!wiphy->bands[band]) 310 if (!wiphy->bands[band])
311 continue; 311 continue;
312 sband = wiphy->bands[band]; 312 sband = wiphy->bands[band];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index ce4da9d79fbd..db9a7829d568 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -1137,7 +1137,7 @@ void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw)
1137 } else { 1137 } else {
1138 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 1138 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1139 "Schedule TxPowerTracking !!\n"); 1139 "Schedule TxPowerTracking !!\n");
1140 dm_txpower_track_cb_therm(hw); 1140 dm_txpower_track_cb_therm(hw);
1141 rtlpriv->dm.tm_trigger = 0; 1141 rtlpriv->dm.tm_trigger = 0;
1142 } 1142 }
1143} 1143}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index a2bb02c7b837..416a9ba6382e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -1903,8 +1903,7 @@ static void _rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
1903 } else { 1903 } else {
1904 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); 1904 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
1905 } 1905 }
1906RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); 1906 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
1907
1908} 1907}
1909 1908
1910static void _rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, 1909static void _rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index 03cbe4cf110b..316be5ff69ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -240,7 +240,7 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
240 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); 240 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
241 falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); 241 falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
242 242
243 ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD); 243 ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
244 falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff); 244 falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
245 falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16); 245 falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
246 246
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 24eff8ea4c2e..35e6bf7e233d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -368,7 +368,7 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
368 status->decrypted = !GET_RX_DESC_SWDEC(pdesc); 368 status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
369 status->rate = (u8)GET_RX_DESC_RXMCS(pdesc); 369 status->rate = (u8)GET_RX_DESC_RXMCS(pdesc);
370 status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); 370 status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1);
371 status->timestamp_low = GET_RX_DESC_TSFL(pdesc); 371 status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
372 status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate); 372 status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate);
373 373
374 status->macid = GET_RX_DESC_MACID(pdesc); 374 status->macid = GET_RX_DESC_MACID(pdesc);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index 4b4612fe2fdb..881821f4e243 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -645,7 +645,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
645 rtlpriv->psc.state_inap); 645 rtlpriv->psc.state_inap);
646 ppsc->last_sleep_jiffies = jiffies; 646 ppsc->last_sleep_jiffies = jiffies;
647 _rtl92se_phy_set_rf_sleep(hw); 647 _rtl92se_phy_set_rf_sleep(hw);
648 break; 648 break;
649 default: 649 default:
650 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 650 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
651 "switch case not processed\n"); 651 "switch case not processed\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
index 00a0531cc5f4..44de695dc999 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
@@ -134,9 +134,9 @@ static bool rtl8723e_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw)
134 if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) { 134 if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
135 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, 135 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
136 "Need to decrease bt power\n"); 136 "Need to decrease bt power\n");
137 rtlpriv->btcoexist.cstate |= 137 rtlpriv->btcoexist.cstate |=
138 BT_COEX_STATE_DEC_BT_POWER; 138 BT_COEX_STATE_DEC_BT_POWER;
139 return true; 139 return true;
140 } 140 }
141 141
142 rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_DEC_BT_POWER; 142 rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_DEC_BT_POWER;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index c983d2fe147f..5a3df9198ddf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -2684,6 +2684,7 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
2684 bool auto_load_fail, u8 *hwinfo) 2684 bool auto_load_fail, u8 *hwinfo)
2685{ 2685{
2686 struct rtl_priv *rtlpriv = rtl_priv(hw); 2686 struct rtl_priv *rtlpriv = rtl_priv(hw);
2687 struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
2687 u8 value; 2688 u8 value;
2688 u32 tmpu_32; 2689 u32 tmpu_32;
2689 2690
@@ -2702,6 +2703,10 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
2702 rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; 2703 rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
2703 } 2704 }
2704 2705
2706 /* override ant_num / ant_path */
2707 if (mod_params->ant_sel)
2708 rtlpriv->btcoexist.btc_info.ant_num =
2709 (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
2705} 2710}
2706 2711
2707void rtl8723be_bt_reg_init(struct ieee80211_hw *hw) 2712void rtl8723be_bt_reg_init(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index b7b73cbe346d..445f681d08c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -1723,8 +1723,8 @@ static u8 _rtl8723be_phy_path_a_rx_iqk(struct ieee80211_hw *hw)
1723 1723
1724 /* Allen 20131125 */ 1724 /* Allen 20131125 */
1725 tmp = (reg_eac & 0x03FF0000) >> 16; 1725 tmp = (reg_eac & 0x03FF0000) >> 16;
1726 if ((tmp & 0x200) > 0) 1726 if ((tmp & 0x200) > 0)
1727 tmp = 0x400 - tmp; 1727 tmp = 0x400 - tmp;
1728 /* if Tx is OK, check whether Rx is OK */ 1728 /* if Tx is OK, check whether Rx is OK */
1729 if (!(reg_eac & BIT(27)) && 1729 if (!(reg_eac & BIT(27)) &&
1730 (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) && 1730 (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
@@ -2301,8 +2301,7 @@ static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
2301 } else { 2301 } else {
2302 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); 2302 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2303 } 2303 }
2304RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); 2304 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
2305
2306} 2305}
2307 2306
2308static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, 2307static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw,
@@ -2606,8 +2605,7 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
2606 "IPS Set eRf nic enable\n"); 2605 "IPS Set eRf nic enable\n");
2607 rtstatus = rtl_ps_enable_nic(hw); 2606 rtstatus = rtl_ps_enable_nic(hw);
2608 } while (!rtstatus && (initializecount < 10)); 2607 } while (!rtstatus && (initializecount < 10));
2609 RT_CLEAR_PS_LEVEL(ppsc, 2608 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
2610 RT_RF_OFF_LEVL_HALT_NIC);
2611 } else { 2609 } else {
2612 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, 2610 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2613 "Set ERFON sleeped:%d ms\n", 2611 "Set ERFON sleeped:%d ms\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
index 5ed4492d3c80..97f5a0377e7a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
@@ -303,8 +303,8 @@ static void _rtl8723be_get_txpower_writeval_by_regulatory(
303 [chnlgroup][index + (rf ? 8 : 0)] & 303 [chnlgroup][index + (rf ? 8 : 0)] &
304 (0x7f << (i * 8))) >> (i * 8)); 304 (0x7f << (i * 8))) >> (i * 8));
305 305
306 if (pwr_diff_limit[i] > pwr_diff) 306 if (pwr_diff_limit[i] > pwr_diff)
307 pwr_diff_limit[i] = pwr_diff; 307 pwr_diff_limit[i] = pwr_diff;
308 } 308 }
309 309
310 customer_limit = (pwr_diff_limit[3] << 24) | 310 customer_limit = (pwr_diff_limit[3] << 24) |
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index a78eaeda0008..2101793438ed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -273,6 +273,7 @@ static struct rtl_mod_params rtl8723be_mod_params = {
273 .msi_support = false, 273 .msi_support = false,
274 .disable_watchdog = false, 274 .disable_watchdog = false,
275 .debug = DBG_EMERG, 275 .debug = DBG_EMERG,
276 .ant_sel = 0,
276}; 277};
277 278
278static struct rtl_hal_cfg rtl8723be_hal_cfg = { 279static struct rtl_hal_cfg rtl8723be_hal_cfg = {
@@ -394,6 +395,7 @@ module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
394module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444); 395module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
395module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, 396module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
396 bool, 0444); 397 bool, 0444);
398module_param_named(ant_sel, rtl8723be_mod_params.ant_sel, int, 0444);
397MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); 399MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
398MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); 400MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
399MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); 401MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
@@ -402,6 +404,7 @@ MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
402MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 404MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
403MODULE_PARM_DESC(disable_watchdog, 405MODULE_PARM_DESC(disable_watchdog,
404 "Set to 1 to disable the watchdog (default 0)\n"); 406 "Set to 1 to disable the watchdog (default 0)\n");
407MODULE_PARM_DESC(ant_sel, "Set to 1 or 2 to force antenna number (default 0)\n");
405 408
406static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); 409static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
407 410
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 95dcbff4673b..e346cb86cb08 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -1957,9 +1957,9 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
1957 rtldm->swing_idx_ofdm_base[p] = 1957 rtldm->swing_idx_ofdm_base[p] =
1958 rtldm->swing_idx_ofdm[p]; 1958 rtldm->swing_idx_ofdm[p];
1959 1959
1960 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 1960 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1961 "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n", 1961 "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
1962 rtldm->thermalvalue, thermal_value); 1962 rtldm->thermalvalue, thermal_value);
1963 /*Record last Power Tracking Thermal Value*/ 1963 /*Record last Power Tracking Thermal Value*/
1964 rtldm->thermalvalue = thermal_value; 1964 rtldm->thermalvalue = thermal_value;
1965 } 1965 }
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 74165b3eb362..ddf74d527017 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -418,9 +418,9 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
418 out = 0x16A; /* -3 dB */ 418 out = 0x16A; /* -3 dB */
419 } 419 }
420 } else { 420 } else {
421 u32 swing = 0, swing_a = 0, swing_b = 0; 421 u32 swing = 0, swing_a = 0, swing_b = 0;
422 422
423 if (band == BAND_ON_2_4G) { 423 if (band == BAND_ON_2_4G) {
424 if (reg_swing_2g == auto_temp) { 424 if (reg_swing_2g == auto_temp) {
425 efuse_shadow_read(hw, 1, 0xC6, (u32 *)&swing); 425 efuse_shadow_read(hw, 1, 0xC6, (u32 *)&swing);
426 swing = (swing == 0xFF) ? 0x00 : swing; 426 swing = (swing == 0xFF) ? 0x00 : swing;
@@ -514,7 +514,7 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
514 514
515 RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, 515 RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
516 "<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out); 516 "<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out);
517 return out; 517 return out;
518} 518}
519 519
520void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) 520void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 554d81420f19..11d9c2307e2f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1359,7 +1359,7 @@ struct rtl_mac {
1359 u32 tx_ss_num; 1359 u32 tx_ss_num;
1360 u32 rx_ss_num; 1360 u32 rx_ss_num;
1361 1361
1362 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 1362 struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
1363 struct ieee80211_hw *hw; 1363 struct ieee80211_hw *hw;
1364 struct ieee80211_vif *vif; 1364 struct ieee80211_vif *vif;
1365 enum nl80211_iftype opmode; 1365 enum nl80211_iftype opmode;
@@ -2246,6 +2246,9 @@ struct rtl_mod_params {
2246 2246
2247 /* default 0: 1 means do not disable interrupts */ 2247 /* default 0: 1 means do not disable interrupts */
2248 bool int_clear; 2248 bool int_clear;
2249
2250 /* select antenna */
2251 int ant_sel;
2249}; 2252};
2250 2253
2251struct rtl_hal_usbint_cfg { 2254struct rtl_hal_usbint_cfg {
@@ -2867,7 +2870,7 @@ value to host byte ordering.*/
2867 (ppsc->cur_ps_level |= _ps_flg) 2870 (ppsc->cur_ps_level |= _ps_flg)
2868 2871
2869#define container_of_dwork_rtl(x, y, z) \ 2872#define container_of_dwork_rtl(x, y, z) \
2870 container_of(container_of(x, struct delayed_work, work), y, z) 2873 container_of(to_delayed_work(x), y, z)
2871 2874
2872#define FILL_OCTET_STRING(_os, _octet, _len) \ 2875#define FILL_OCTET_STRING(_os, _octet, _len) \
2873 (_os).octet = (u8 *)(_octet); \ 2876 (_os).octet = (u8 *)(_octet); \
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index a13d1f2b5912..569918c485b4 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1291,7 +1291,7 @@ static int set_channel(struct usbnet *usbdev, int channel)
1291 return 0; 1291 return 0;
1292 1292
1293 dsconfig = 1000 * 1293 dsconfig = 1000 *
1294 ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); 1294 ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
1295 1295
1296 len = sizeof(config); 1296 len = sizeof(config);
1297 ret = rndis_query_oid(usbdev, 1297 ret = rndis_query_oid(usbdev,
@@ -3476,7 +3476,7 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
3476 priv->band.n_channels = ARRAY_SIZE(rndis_channels); 3476 priv->band.n_channels = ARRAY_SIZE(rndis_channels);
3477 priv->band.bitrates = priv->rates; 3477 priv->band.bitrates = priv->rates;
3478 priv->band.n_bitrates = ARRAY_SIZE(rndis_rates); 3478 priv->band.n_bitrates = ARRAY_SIZE(rndis_rates);
3479 wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 3479 wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
3480 wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; 3480 wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
3481 3481
3482 memcpy(priv->cipher_suites, rndis_cipher_suites, 3482 memcpy(priv->cipher_suites, rndis_cipher_suites,
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 4df992de7d07..dbb23899ddcb 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -20,84 +20,84 @@
20#include "rsi_common.h" 20#include "rsi_common.h"
21 21
22static const struct ieee80211_channel rsi_2ghz_channels[] = { 22static const struct ieee80211_channel rsi_2ghz_channels[] = {
23 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, 23 { .band = NL80211_BAND_2GHZ, .center_freq = 2412,
24 .hw_value = 1 }, /* Channel 1 */ 24 .hw_value = 1 }, /* Channel 1 */
25 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, 25 { .band = NL80211_BAND_2GHZ, .center_freq = 2417,
26 .hw_value = 2 }, /* Channel 2 */ 26 .hw_value = 2 }, /* Channel 2 */
27 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, 27 { .band = NL80211_BAND_2GHZ, .center_freq = 2422,
28 .hw_value = 3 }, /* Channel 3 */ 28 .hw_value = 3 }, /* Channel 3 */
29 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, 29 { .band = NL80211_BAND_2GHZ, .center_freq = 2427,
30 .hw_value = 4 }, /* Channel 4 */ 30 .hw_value = 4 }, /* Channel 4 */
31 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, 31 { .band = NL80211_BAND_2GHZ, .center_freq = 2432,
32 .hw_value = 5 }, /* Channel 5 */ 32 .hw_value = 5 }, /* Channel 5 */
33 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, 33 { .band = NL80211_BAND_2GHZ, .center_freq = 2437,
34 .hw_value = 6 }, /* Channel 6 */ 34 .hw_value = 6 }, /* Channel 6 */
35 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, 35 { .band = NL80211_BAND_2GHZ, .center_freq = 2442,
36 .hw_value = 7 }, /* Channel 7 */ 36 .hw_value = 7 }, /* Channel 7 */
37 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, 37 { .band = NL80211_BAND_2GHZ, .center_freq = 2447,
38 .hw_value = 8 }, /* Channel 8 */ 38 .hw_value = 8 }, /* Channel 8 */
39 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, 39 { .band = NL80211_BAND_2GHZ, .center_freq = 2452,
40 .hw_value = 9 }, /* Channel 9 */ 40 .hw_value = 9 }, /* Channel 9 */
41 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, 41 { .band = NL80211_BAND_2GHZ, .center_freq = 2457,
42 .hw_value = 10 }, /* Channel 10 */ 42 .hw_value = 10 }, /* Channel 10 */
43 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, 43 { .band = NL80211_BAND_2GHZ, .center_freq = 2462,
44 .hw_value = 11 }, /* Channel 11 */ 44 .hw_value = 11 }, /* Channel 11 */
45 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, 45 { .band = NL80211_BAND_2GHZ, .center_freq = 2467,
46 .hw_value = 12 }, /* Channel 12 */ 46 .hw_value = 12 }, /* Channel 12 */
47 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, 47 { .band = NL80211_BAND_2GHZ, .center_freq = 2472,
48 .hw_value = 13 }, /* Channel 13 */ 48 .hw_value = 13 }, /* Channel 13 */
49 { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, 49 { .band = NL80211_BAND_2GHZ, .center_freq = 2484,
50 .hw_value = 14 }, /* Channel 14 */ 50 .hw_value = 14 }, /* Channel 14 */
51}; 51};
52 52
53static const struct ieee80211_channel rsi_5ghz_channels[] = { 53static const struct ieee80211_channel rsi_5ghz_channels[] = {
54 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, 54 { .band = NL80211_BAND_5GHZ, .center_freq = 5180,
55 .hw_value = 36, }, /* Channel 36 */ 55 .hw_value = 36, }, /* Channel 36 */
56 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, 56 { .band = NL80211_BAND_5GHZ, .center_freq = 5200,
57 .hw_value = 40, }, /* Channel 40 */ 57 .hw_value = 40, }, /* Channel 40 */
58 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, 58 { .band = NL80211_BAND_5GHZ, .center_freq = 5220,
59 .hw_value = 44, }, /* Channel 44 */ 59 .hw_value = 44, }, /* Channel 44 */
60 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, 60 { .band = NL80211_BAND_5GHZ, .center_freq = 5240,
61 .hw_value = 48, }, /* Channel 48 */ 61 .hw_value = 48, }, /* Channel 48 */
62 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5260, 62 { .band = NL80211_BAND_5GHZ, .center_freq = 5260,
63 .hw_value = 52, }, /* Channel 52 */ 63 .hw_value = 52, }, /* Channel 52 */
64 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5280, 64 { .band = NL80211_BAND_5GHZ, .center_freq = 5280,
65 .hw_value = 56, }, /* Channel 56 */ 65 .hw_value = 56, }, /* Channel 56 */
66 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5300, 66 { .band = NL80211_BAND_5GHZ, .center_freq = 5300,
67 .hw_value = 60, }, /* Channel 60 */ 67 .hw_value = 60, }, /* Channel 60 */
68 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5320, 68 { .band = NL80211_BAND_5GHZ, .center_freq = 5320,
69 .hw_value = 64, }, /* Channel 64 */ 69 .hw_value = 64, }, /* Channel 64 */
70 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5500, 70 { .band = NL80211_BAND_5GHZ, .center_freq = 5500,
71 .hw_value = 100, }, /* Channel 100 */ 71 .hw_value = 100, }, /* Channel 100 */
72 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5520, 72 { .band = NL80211_BAND_5GHZ, .center_freq = 5520,
73 .hw_value = 104, }, /* Channel 104 */ 73 .hw_value = 104, }, /* Channel 104 */
74 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5540, 74 { .band = NL80211_BAND_5GHZ, .center_freq = 5540,
75 .hw_value = 108, }, /* Channel 108 */ 75 .hw_value = 108, }, /* Channel 108 */
76 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5560, 76 { .band = NL80211_BAND_5GHZ, .center_freq = 5560,
77 .hw_value = 112, }, /* Channel 112 */ 77 .hw_value = 112, }, /* Channel 112 */
78 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5580, 78 { .band = NL80211_BAND_5GHZ, .center_freq = 5580,
79 .hw_value = 116, }, /* Channel 116 */ 79 .hw_value = 116, }, /* Channel 116 */
80 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5600, 80 { .band = NL80211_BAND_5GHZ, .center_freq = 5600,
81 .hw_value = 120, }, /* Channel 120 */ 81 .hw_value = 120, }, /* Channel 120 */
82 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5620, 82 { .band = NL80211_BAND_5GHZ, .center_freq = 5620,
83 .hw_value = 124, }, /* Channel 124 */ 83 .hw_value = 124, }, /* Channel 124 */
84 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5640, 84 { .band = NL80211_BAND_5GHZ, .center_freq = 5640,
85 .hw_value = 128, }, /* Channel 128 */ 85 .hw_value = 128, }, /* Channel 128 */
86 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5660, 86 { .band = NL80211_BAND_5GHZ, .center_freq = 5660,
87 .hw_value = 132, }, /* Channel 132 */ 87 .hw_value = 132, }, /* Channel 132 */
88 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5680, 88 { .band = NL80211_BAND_5GHZ, .center_freq = 5680,
89 .hw_value = 136, }, /* Channel 136 */ 89 .hw_value = 136, }, /* Channel 136 */
90 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5700, 90 { .band = NL80211_BAND_5GHZ, .center_freq = 5700,
91 .hw_value = 140, }, /* Channel 140 */ 91 .hw_value = 140, }, /* Channel 140 */
92 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5745, 92 { .band = NL80211_BAND_5GHZ, .center_freq = 5745,
93 .hw_value = 149, }, /* Channel 149 */ 93 .hw_value = 149, }, /* Channel 149 */
94 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5765, 94 { .band = NL80211_BAND_5GHZ, .center_freq = 5765,
95 .hw_value = 153, }, /* Channel 153 */ 95 .hw_value = 153, }, /* Channel 153 */
96 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5785, 96 { .band = NL80211_BAND_5GHZ, .center_freq = 5785,
97 .hw_value = 157, }, /* Channel 157 */ 97 .hw_value = 157, }, /* Channel 157 */
98 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5805, 98 { .band = NL80211_BAND_5GHZ, .center_freq = 5805,
99 .hw_value = 161, }, /* Channel 161 */ 99 .hw_value = 161, }, /* Channel 161 */
100 { .band = IEEE80211_BAND_5GHZ, .center_freq = 5825, 100 { .band = NL80211_BAND_5GHZ, .center_freq = 5825,
101 .hw_value = 165, }, /* Channel 165 */ 101 .hw_value = 165, }, /* Channel 165 */
102}; 102};
103 103
@@ -150,12 +150,12 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
150 struct ieee80211_supported_band *sbands = &adapter->sbands[band]; 150 struct ieee80211_supported_band *sbands = &adapter->sbands[band];
151 void *channels = NULL; 151 void *channels = NULL;
152 152
153 if (band == IEEE80211_BAND_2GHZ) { 153 if (band == NL80211_BAND_2GHZ) {
154 channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL); 154 channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
155 memcpy(channels, 155 memcpy(channels,
156 rsi_2ghz_channels, 156 rsi_2ghz_channels,
157 sizeof(rsi_2ghz_channels)); 157 sizeof(rsi_2ghz_channels));
158 sbands->band = IEEE80211_BAND_2GHZ; 158 sbands->band = NL80211_BAND_2GHZ;
159 sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels); 159 sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
160 sbands->bitrates = rsi_rates; 160 sbands->bitrates = rsi_rates;
161 sbands->n_bitrates = ARRAY_SIZE(rsi_rates); 161 sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
@@ -164,7 +164,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
164 memcpy(channels, 164 memcpy(channels,
165 rsi_5ghz_channels, 165 rsi_5ghz_channels,
166 sizeof(rsi_5ghz_channels)); 166 sizeof(rsi_5ghz_channels));
167 sbands->band = IEEE80211_BAND_5GHZ; 167 sbands->band = NL80211_BAND_5GHZ;
168 sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels); 168 sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
169 sbands->bitrates = &rsi_rates[4]; 169 sbands->bitrates = &rsi_rates[4];
170 sbands->n_bitrates = ARRAY_SIZE(rsi_rates) - 4; 170 sbands->n_bitrates = ARRAY_SIZE(rsi_rates) - 4;
@@ -775,7 +775,7 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
775{ 775{
776 struct rsi_hw *adapter = hw->priv; 776 struct rsi_hw *adapter = hw->priv;
777 struct rsi_common *common = adapter->priv; 777 struct rsi_common *common = adapter->priv;
778 enum ieee80211_band band = hw->conf.chandef.chan->band; 778 enum nl80211_band band = hw->conf.chandef.chan->band;
779 779
780 mutex_lock(&common->mutex); 780 mutex_lock(&common->mutex);
781 common->fixedrate_mask[band] = 0; 781 common->fixedrate_mask[band] = 0;
@@ -999,8 +999,8 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
999 999
1000 mutex_lock(&common->mutex); 1000 mutex_lock(&common->mutex);
1001 /* Resetting all the fields to default values */ 1001 /* Resetting all the fields to default values */
1002 common->bitrate_mask[IEEE80211_BAND_2GHZ] = 0; 1002 common->bitrate_mask[NL80211_BAND_2GHZ] = 0;
1003 common->bitrate_mask[IEEE80211_BAND_5GHZ] = 0; 1003 common->bitrate_mask[NL80211_BAND_5GHZ] = 0;
1004 common->min_rate = 0xffff; 1004 common->min_rate = 0xffff;
1005 common->vif_info[0].is_ht = false; 1005 common->vif_info[0].is_ht = false;
1006 common->vif_info[0].sgi = false; 1006 common->vif_info[0].sgi = false;
@@ -1070,8 +1070,8 @@ int rsi_mac80211_attach(struct rsi_common *common)
1070 hw->max_rate_tries = MAX_RETRIES; 1070 hw->max_rate_tries = MAX_RETRIES;
1071 1071
1072 hw->max_tx_aggregation_subframes = 6; 1072 hw->max_tx_aggregation_subframes = 6;
1073 rsi_register_rates_channels(adapter, IEEE80211_BAND_2GHZ); 1073 rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
1074 rsi_register_rates_channels(adapter, IEEE80211_BAND_5GHZ); 1074 rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
1075 hw->rate_control_algorithm = "AARF"; 1075 hw->rate_control_algorithm = "AARF";
1076 1076
1077 SET_IEEE80211_PERM_ADDR(hw, common->mac_addr); 1077 SET_IEEE80211_PERM_ADDR(hw, common->mac_addr);
@@ -1087,10 +1087,10 @@ int rsi_mac80211_attach(struct rsi_common *common)
1087 1087
1088 wiphy->available_antennas_rx = 1; 1088 wiphy->available_antennas_rx = 1;
1089 wiphy->available_antennas_tx = 1; 1089 wiphy->available_antennas_tx = 1;
1090 wiphy->bands[IEEE80211_BAND_2GHZ] = 1090 wiphy->bands[NL80211_BAND_2GHZ] =
1091 &adapter->sbands[IEEE80211_BAND_2GHZ]; 1091 &adapter->sbands[NL80211_BAND_2GHZ];
1092 wiphy->bands[IEEE80211_BAND_5GHZ] = 1092 wiphy->bands[NL80211_BAND_5GHZ] =
1093 &adapter->sbands[IEEE80211_BAND_5GHZ]; 1093 &adapter->sbands[NL80211_BAND_5GHZ];
1094 1094
1095 status = ieee80211_register_hw(hw); 1095 status = ieee80211_register_hw(hw);
1096 if (status) 1096 if (status)
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index e43b59d5b53b..40658b62d077 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -210,7 +210,7 @@ static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130};
210 */ 210 */
211static void rsi_set_default_parameters(struct rsi_common *common) 211static void rsi_set_default_parameters(struct rsi_common *common)
212{ 212{
213 common->band = IEEE80211_BAND_2GHZ; 213 common->band = NL80211_BAND_2GHZ;
214 common->channel_width = BW_20MHZ; 214 common->channel_width = BW_20MHZ;
215 common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 215 common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
216 common->channel = 1; 216 common->channel = 1;
@@ -655,7 +655,7 @@ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode)
655 vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold); 655 vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold);
656 vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6); 656 vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6);
657 657
658 if (common->band == IEEE80211_BAND_5GHZ) { 658 if (common->band == NL80211_BAND_5GHZ) {
659 vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6); 659 vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6);
660 if (conf_is_ht40(&common->priv->hw->conf)) { 660 if (conf_is_ht40(&common->priv->hw->conf)) {
661 vap_caps->default_ctrl_rate |= 661 vap_caps->default_ctrl_rate |=
@@ -872,7 +872,7 @@ int rsi_band_check(struct rsi_common *common)
872 else 872 else
873 common->channel_width = BW_40MHZ; 873 common->channel_width = BW_40MHZ;
874 874
875 if (common->band == IEEE80211_BAND_2GHZ) { 875 if (common->band == NL80211_BAND_2GHZ) {
876 if (common->channel_width) 876 if (common->channel_width)
877 common->endpoint = EP_2GHZ_40MHZ; 877 common->endpoint = EP_2GHZ_40MHZ;
878 else 878 else
@@ -1046,7 +1046,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common)
1046 if (common->channel_width == BW_40MHZ) 1046 if (common->channel_width == BW_40MHZ)
1047 auto_rate->desc_word[7] |= cpu_to_le16(1); 1047 auto_rate->desc_word[7] |= cpu_to_le16(1);
1048 1048
1049 if (band == IEEE80211_BAND_2GHZ) { 1049 if (band == NL80211_BAND_2GHZ) {
1050 min_rate = RSI_RATE_1; 1050 min_rate = RSI_RATE_1;
1051 rate_table_offset = 0; 1051 rate_table_offset = 0;
1052 } else { 1052 } else {
diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c
index 702593f19997..02920c93e82d 100644
--- a/drivers/net/wireless/rsi/rsi_91x_pkt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_pkt.c
@@ -27,22 +27,24 @@
27int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) 27int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
28{ 28{
29 struct rsi_hw *adapter = common->priv; 29 struct rsi_hw *adapter = common->priv;
30 struct ieee80211_hdr *tmp_hdr = NULL; 30 struct ieee80211_hdr *tmp_hdr;
31 struct ieee80211_tx_info *info; 31 struct ieee80211_tx_info *info;
32 struct skb_info *tx_params; 32 struct skb_info *tx_params;
33 struct ieee80211_bss_conf *bss = NULL; 33 struct ieee80211_bss_conf *bss;
34 int status = -EINVAL; 34 int status;
35 u8 ieee80211_size = MIN_802_11_HDR_LEN; 35 u8 ieee80211_size = MIN_802_11_HDR_LEN;
36 u8 extnd_size = 0; 36 u8 extnd_size;
37 __le16 *frame_desc; 37 __le16 *frame_desc;
38 u16 seq_num = 0; 38 u16 seq_num;
39 39
40 info = IEEE80211_SKB_CB(skb); 40 info = IEEE80211_SKB_CB(skb);
41 bss = &info->control.vif->bss_conf; 41 bss = &info->control.vif->bss_conf;
42 tx_params = (struct skb_info *)info->driver_data; 42 tx_params = (struct skb_info *)info->driver_data;
43 43
44 if (!bss->assoc) 44 if (!bss->assoc) {
45 status = -EINVAL;
45 goto err; 46 goto err;
47 }
46 48
47 tmp_hdr = (struct ieee80211_hdr *)&skb->data[0]; 49 tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
48 seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4); 50 seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4);
@@ -123,15 +125,15 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
123 struct sk_buff *skb) 125 struct sk_buff *skb)
124{ 126{
125 struct rsi_hw *adapter = common->priv; 127 struct rsi_hw *adapter = common->priv;
126 struct ieee80211_hdr *wh = NULL; 128 struct ieee80211_hdr *wh;
127 struct ieee80211_tx_info *info; 129 struct ieee80211_tx_info *info;
128 struct ieee80211_bss_conf *bss = NULL; 130 struct ieee80211_bss_conf *bss;
129 struct ieee80211_hw *hw = adapter->hw; 131 struct ieee80211_hw *hw = adapter->hw;
130 struct ieee80211_conf *conf = &hw->conf; 132 struct ieee80211_conf *conf = &hw->conf;
131 struct skb_info *tx_params; 133 struct skb_info *tx_params;
132 int status = -E2BIG; 134 int status = -E2BIG;
133 __le16 *msg = NULL; 135 __le16 *msg;
134 u8 extnd_size = 0; 136 u8 extnd_size;
135 u8 vap_id = 0; 137 u8 vap_id = 0;
136 138
137 info = IEEE80211_SKB_CB(skb); 139 info = IEEE80211_SKB_CB(skb);
@@ -182,7 +184,7 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
182 if (wh->addr1[0] & BIT(0)) 184 if (wh->addr1[0] & BIT(0))
183 msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT); 185 msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT);
184 186
185 if (common->band == IEEE80211_BAND_2GHZ) 187 if (common->band == NL80211_BAND_2GHZ)
186 msg[4] = cpu_to_le16(RSI_11B_MODE); 188 msg[4] = cpu_to_le16(RSI_11B_MODE);
187 else 189 else
188 msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE); 190 msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE);
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index 5baed945f60e..dcd095787166 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -211,7 +211,7 @@ struct rsi_hw {
211 struct ieee80211_hw *hw; 211 struct ieee80211_hw *hw;
212 struct ieee80211_vif *vifs[RSI_MAX_VIFS]; 212 struct ieee80211_vif *vifs[RSI_MAX_VIFS];
213 struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES]; 213 struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
214 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 214 struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
215 215
216 struct device *device; 216 struct device *device;
217 u8 sc_nvifs; 217 u8 sc_nvifs;
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index 0e51e27d2e3f..dc478cedbde0 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -102,7 +102,7 @@ static struct ieee80211_rate cw1200_mcs_rates[] = {
102 102
103 103
104#define CHAN2G(_channel, _freq, _flags) { \ 104#define CHAN2G(_channel, _freq, _flags) { \
105 .band = IEEE80211_BAND_2GHZ, \ 105 .band = NL80211_BAND_2GHZ, \
106 .center_freq = (_freq), \ 106 .center_freq = (_freq), \
107 .hw_value = (_channel), \ 107 .hw_value = (_channel), \
108 .flags = (_flags), \ 108 .flags = (_flags), \
@@ -111,7 +111,7 @@ static struct ieee80211_rate cw1200_mcs_rates[] = {
111} 111}
112 112
113#define CHAN5G(_channel, _flags) { \ 113#define CHAN5G(_channel, _flags) { \
114 .band = IEEE80211_BAND_5GHZ, \ 114 .band = NL80211_BAND_5GHZ, \
115 .center_freq = 5000 + (5 * (_channel)), \ 115 .center_freq = 5000 + (5 * (_channel)), \
116 .hw_value = (_channel), \ 116 .hw_value = (_channel), \
117 .flags = (_flags), \ 117 .flags = (_flags), \
@@ -311,12 +311,12 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
311 311
312 hw->sta_data_size = sizeof(struct cw1200_sta_priv); 312 hw->sta_data_size = sizeof(struct cw1200_sta_priv);
313 313
314 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz; 314 hw->wiphy->bands[NL80211_BAND_2GHZ] = &cw1200_band_2ghz;
315 if (have_5ghz) 315 if (have_5ghz)
316 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz; 316 hw->wiphy->bands[NL80211_BAND_5GHZ] = &cw1200_band_5ghz;
317 317
318 /* Channel params have to be cleared before registering wiphy again */ 318 /* Channel params have to be cleared before registering wiphy again */
319 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 319 for (band = 0; band < NUM_NL80211_BANDS; band++) {
320 struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; 320 struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
321 if (!sband) 321 if (!sband)
322 continue; 322 continue;
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index bff81b8d4164..983788156bb0 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -402,7 +402,7 @@ void cw1200_probe_work(struct work_struct *work)
402 } 402 }
403 wsm = (struct wsm_tx *)frame.skb->data; 403 wsm = (struct wsm_tx *)frame.skb->data;
404 scan.max_tx_rate = wsm->max_tx_rate; 404 scan.max_tx_rate = wsm->max_tx_rate;
405 scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? 405 scan.band = (priv->channel->band == NL80211_BAND_5GHZ) ?
406 WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; 406 WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
407 if (priv->join_status == CW1200_JOIN_STATUS_STA || 407 if (priv->join_status == CW1200_JOIN_STATUS_STA ||
408 priv->join_status == CW1200_JOIN_STATUS_IBSS) { 408 priv->join_status == CW1200_JOIN_STATUS_IBSS) {
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index d0ddcde6c695..daf06a4f842e 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -1278,7 +1278,7 @@ static void cw1200_do_join(struct cw1200_common *priv)
1278 join.dtim_period = priv->join_dtim_period; 1278 join.dtim_period = priv->join_dtim_period;
1279 1279
1280 join.channel_number = priv->channel->hw_value; 1280 join.channel_number = priv->channel->hw_value;
1281 join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? 1281 join.band = (priv->channel->band == NL80211_BAND_5GHZ) ?
1282 WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; 1282 WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
1283 1283
1284 memcpy(join.bssid, bssid, sizeof(join.bssid)); 1284 memcpy(join.bssid, bssid, sizeof(join.bssid));
@@ -1462,7 +1462,7 @@ int cw1200_enable_listening(struct cw1200_common *priv)
1462 }; 1462 };
1463 1463
1464 if (priv->channel) { 1464 if (priv->channel) {
1465 start.band = priv->channel->band == IEEE80211_BAND_5GHZ ? 1465 start.band = priv->channel->band == NL80211_BAND_5GHZ ?
1466 WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; 1466 WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
1467 start.channel_number = priv->channel->hw_value; 1467 start.channel_number = priv->channel->hw_value;
1468 } else { 1468 } else {
@@ -2315,7 +2315,7 @@ static int cw1200_start_ap(struct cw1200_common *priv)
2315 struct wsm_start start = { 2315 struct wsm_start start = {
2316 .mode = priv->vif->p2p ? 2316 .mode = priv->vif->p2p ?
2317 WSM_START_MODE_P2P_GO : WSM_START_MODE_AP, 2317 WSM_START_MODE_P2P_GO : WSM_START_MODE_AP,
2318 .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? 2318 .band = (priv->channel->band == NL80211_BAND_5GHZ) ?
2319 WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G, 2319 WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G,
2320 .channel_number = priv->channel->hw_value, 2320 .channel_number = priv->channel->hw_value,
2321 .beacon_interval = conf->beacon_int, 2321 .beacon_interval = conf->beacon_int,
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index d28bd49cb5fd..3d170287cd0b 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -1079,7 +1079,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
1079 1079
1080 hdr->band = ((arg->channel_number & 0xff00) || 1080 hdr->band = ((arg->channel_number & 0xff00) ||
1081 (arg->channel_number > 14)) ? 1081 (arg->channel_number > 14)) ?
1082 IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; 1082 NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
1083 hdr->freq = ieee80211_channel_to_frequency( 1083 hdr->freq = ieee80211_channel_to_frequency(
1084 arg->channel_number, 1084 arg->channel_number,
1085 hdr->band); 1085 hdr->band);
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index 9e0ca3048657..680d60eabc75 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -849,9 +849,9 @@ static int wsm_startup_indication(struct cw1200_common *priv,
849 849
850 /* Disable unsupported frequency bands */ 850 /* Disable unsupported frequency bands */
851 if (!(priv->wsm_caps.fw_cap & 0x1)) 851 if (!(priv->wsm_caps.fw_cap & 0x1))
852 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL; 852 priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
853 if (!(priv->wsm_caps.fw_cap & 0x2)) 853 if (!(priv->wsm_caps.fw_cap & 0x2))
854 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; 854 priv->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
855 855
856 priv->firmware_ready = 1; 856 priv->firmware_ready = 1;
857 wake_up(&priv->wsm_startup_done); 857 wake_up(&priv->wsm_startup_done);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index cd4777954f87..56384a4e2a35 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1482,7 +1482,7 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1482 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 1482 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1483 BIT(NL80211_IFTYPE_ADHOC); 1483 BIT(NL80211_IFTYPE_ADHOC);
1484 wl->hw->wiphy->max_scan_ssids = 1; 1484 wl->hw->wiphy->max_scan_ssids = 1;
1485 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz; 1485 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wl1251_band_2ghz;
1486 1486
1487 wl->hw->queues = 4; 1487 wl->hw->queues = 4;
1488 1488
diff --git a/drivers/net/wireless/ti/wl1251/ps.c b/drivers/net/wireless/ti/wl1251/ps.c
index b9e27b98bbc9..fa01b0a0f312 100644
--- a/drivers/net/wireless/ti/wl1251/ps.c
+++ b/drivers/net/wireless/ti/wl1251/ps.c
@@ -32,7 +32,7 @@ void wl1251_elp_work(struct work_struct *work)
32 struct delayed_work *dwork; 32 struct delayed_work *dwork;
33 struct wl1251 *wl; 33 struct wl1251 *wl;
34 34
35 dwork = container_of(work, struct delayed_work, work); 35 dwork = to_delayed_work(work);
36 wl = container_of(dwork, struct wl1251, elp_work); 36 wl = container_of(dwork, struct wl1251, elp_work);
37 37
38 wl1251_debug(DEBUG_PSM, "elp work"); 38 wl1251_debug(DEBUG_PSM, "elp work");
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index cde0eaf99714..a27d4c22b6e8 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -53,7 +53,7 @@ static void wl1251_rx_status(struct wl1251 *wl,
53 53
54 memset(status, 0, sizeof(struct ieee80211_rx_status)); 54 memset(status, 0, sizeof(struct ieee80211_rx_status));
55 55
56 status->band = IEEE80211_BAND_2GHZ; 56 status->band = NL80211_BAND_2GHZ;
57 status->mactime = desc->timestamp; 57 status->mactime = desc->timestamp;
58 58
59 /* 59 /*
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index a0d6cccc56f3..58b9d3c3a833 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -469,8 +469,8 @@ static const u8 wl12xx_rate_to_idx_5ghz[] = {
469}; 469};
470 470
471static const u8 *wl12xx_band_rate_to_idx[] = { 471static const u8 *wl12xx_band_rate_to_idx[] = {
472 [IEEE80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz, 472 [NL80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
473 [IEEE80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz 473 [NL80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
474}; 474};
475 475
476enum wl12xx_hw_rates { 476enum wl12xx_hw_rates {
@@ -1827,8 +1827,8 @@ static int wl12xx_setup(struct wl1271 *wl)
1827 wl->fw_status_priv_len = 0; 1827 wl->fw_status_priv_len = 0;
1828 wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics); 1828 wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
1829 wl->ofdm_only_ap = true; 1829 wl->ofdm_only_ap = true;
1830 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap); 1830 wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ, &wl12xx_ht_cap);
1831 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap); 1831 wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ, &wl12xx_ht_cap);
1832 wl12xx_conf_init(wl); 1832 wl12xx_conf_init(wl);
1833 1833
1834 if (!fref_param) { 1834 if (!fref_param) {
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index ebed13af9852..8d475393f9e3 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -27,7 +27,7 @@
27static int wl1271_get_scan_channels(struct wl1271 *wl, 27static int wl1271_get_scan_channels(struct wl1271 *wl,
28 struct cfg80211_scan_request *req, 28 struct cfg80211_scan_request *req,
29 struct basic_scan_channel_params *channels, 29 struct basic_scan_channel_params *channels,
30 enum ieee80211_band band, bool passive) 30 enum nl80211_band band, bool passive)
31{ 31{
32 struct conf_scan_settings *c = &wl->conf.scan; 32 struct conf_scan_settings *c = &wl->conf.scan;
33 int i, j; 33 int i, j;
@@ -92,7 +92,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
92#define WL1271_NOTHING_TO_SCAN 1 92#define WL1271_NOTHING_TO_SCAN 1
93 93
94static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, 94static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
95 enum ieee80211_band band, 95 enum nl80211_band band,
96 bool passive, u32 basic_rate) 96 bool passive, u32 basic_rate)
97{ 97{
98 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 98 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
@@ -144,12 +144,12 @@ static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
144 cmd->params.tid_trigger = CONF_TX_AC_ANY_TID; 144 cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
145 cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; 145 cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
146 146
147 if (band == IEEE80211_BAND_2GHZ) 147 if (band == NL80211_BAND_2GHZ)
148 cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ; 148 cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
149 else 149 else
150 cmd->params.band = WL1271_SCAN_BAND_5_GHZ; 150 cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
151 151
152 if (wl->scan.ssid_len && wl->scan.ssid) { 152 if (wl->scan.ssid_len) {
153 cmd->params.ssid_len = wl->scan.ssid_len; 153 cmd->params.ssid_len = wl->scan.ssid_len;
154 memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len); 154 memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
155 } 155 }
@@ -218,7 +218,7 @@ out:
218void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif) 218void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
219{ 219{
220 int ret = 0; 220 int ret = 0;
221 enum ieee80211_band band; 221 enum nl80211_band band;
222 u32 rate, mask; 222 u32 rate, mask;
223 223
224 switch (wl->scan.state) { 224 switch (wl->scan.state) {
@@ -226,7 +226,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
226 break; 226 break;
227 227
228 case WL1271_SCAN_STATE_2GHZ_ACTIVE: 228 case WL1271_SCAN_STATE_2GHZ_ACTIVE:
229 band = IEEE80211_BAND_2GHZ; 229 band = NL80211_BAND_2GHZ;
230 mask = wlvif->bitrate_masks[band]; 230 mask = wlvif->bitrate_masks[band];
231 if (wl->scan.req->no_cck) { 231 if (wl->scan.req->no_cck) {
232 mask &= ~CONF_TX_CCK_RATES; 232 mask &= ~CONF_TX_CCK_RATES;
@@ -243,7 +243,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
243 break; 243 break;
244 244
245 case WL1271_SCAN_STATE_2GHZ_PASSIVE: 245 case WL1271_SCAN_STATE_2GHZ_PASSIVE:
246 band = IEEE80211_BAND_2GHZ; 246 band = NL80211_BAND_2GHZ;
247 mask = wlvif->bitrate_masks[band]; 247 mask = wlvif->bitrate_masks[band];
248 if (wl->scan.req->no_cck) { 248 if (wl->scan.req->no_cck) {
249 mask &= ~CONF_TX_CCK_RATES; 249 mask &= ~CONF_TX_CCK_RATES;
@@ -263,7 +263,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
263 break; 263 break;
264 264
265 case WL1271_SCAN_STATE_5GHZ_ACTIVE: 265 case WL1271_SCAN_STATE_5GHZ_ACTIVE:
266 band = IEEE80211_BAND_5GHZ; 266 band = NL80211_BAND_5GHZ;
267 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); 267 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
268 ret = wl1271_scan_send(wl, wlvif, band, false, rate); 268 ret = wl1271_scan_send(wl, wlvif, band, false, rate);
269 if (ret == WL1271_NOTHING_TO_SCAN) { 269 if (ret == WL1271_NOTHING_TO_SCAN) {
@@ -274,7 +274,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
274 break; 274 break;
275 275
276 case WL1271_SCAN_STATE_5GHZ_PASSIVE: 276 case WL1271_SCAN_STATE_5GHZ_PASSIVE:
277 band = IEEE80211_BAND_5GHZ; 277 band = NL80211_BAND_5GHZ;
278 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); 278 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
279 ret = wl1271_scan_send(wl, wlvif, band, true, rate); 279 ret = wl1271_scan_send(wl, wlvif, band, true, rate);
280 if (ret == WL1271_NOTHING_TO_SCAN) { 280 if (ret == WL1271_NOTHING_TO_SCAN) {
@@ -378,7 +378,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
378 wl12xx_adjust_channels(cfg, cfg_channels); 378 wl12xx_adjust_channels(cfg, cfg_channels);
379 379
380 if (!force_passive && cfg->active[0]) { 380 if (!force_passive && cfg->active[0]) {
381 u8 band = IEEE80211_BAND_2GHZ; 381 u8 band = NL80211_BAND_2GHZ;
382 ret = wl12xx_cmd_build_probe_req(wl, wlvif, 382 ret = wl12xx_cmd_build_probe_req(wl, wlvif,
383 wlvif->role_id, band, 383 wlvif->role_id, band,
384 req->ssids[0].ssid, 384 req->ssids[0].ssid,
@@ -395,7 +395,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
395 } 395 }
396 396
397 if (!force_passive && cfg->active[1]) { 397 if (!force_passive && cfg->active[1]) {
398 u8 band = IEEE80211_BAND_5GHZ; 398 u8 band = NL80211_BAND_5GHZ;
399 ret = wl12xx_cmd_build_probe_req(wl, wlvif, 399 ret = wl12xx_cmd_build_probe_req(wl, wlvif,
400 wlvif->role_id, band, 400 wlvif->role_id, band,
401 req->ssids[0].ssid, 401 req->ssids[0].ssid,
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c
index a8d176ddc73c..63e95ba744fd 100644
--- a/drivers/net/wireless/ti/wl18xx/cmd.c
+++ b/drivers/net/wireless/ti/wl18xx/cmd.c
@@ -48,10 +48,10 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl,
48 cmd->stop_tx = ch_switch->block_tx; 48 cmd->stop_tx = ch_switch->block_tx;
49 49
50 switch (ch_switch->chandef.chan->band) { 50 switch (ch_switch->chandef.chan->band) {
51 case IEEE80211_BAND_2GHZ: 51 case NL80211_BAND_2GHZ:
52 cmd->band = WLCORE_BAND_2_4GHZ; 52 cmd->band = WLCORE_BAND_2_4GHZ;
53 break; 53 break;
54 case IEEE80211_BAND_5GHZ: 54 case NL80211_BAND_5GHZ:
55 cmd->band = WLCORE_BAND_5GHZ; 55 cmd->band = WLCORE_BAND_5GHZ;
56 break; 56 break;
57 default: 57 default:
@@ -187,7 +187,7 @@ int wl18xx_cmd_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start)
187 187
188 cmd->role_id = wlvif->role_id; 188 cmd->role_id = wlvif->role_id;
189 cmd->channel = wlvif->channel; 189 cmd->channel = wlvif->channel;
190 if (wlvif->band == IEEE80211_BAND_5GHZ) 190 if (wlvif->band == NL80211_BAND_5GHZ)
191 cmd->band = WLCORE_BAND_5GHZ; 191 cmd->band = WLCORE_BAND_5GHZ;
192 cmd->bandwidth = wlcore_get_native_channel_type(wlvif->channel_type); 192 cmd->bandwidth = wlcore_get_native_channel_type(wlvif->channel_type);
193 193
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index ff6e46dd61f8..ef811848d141 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -64,13 +64,13 @@ static int wlcore_smart_config_sync_event(struct wl1271 *wl, u8 sync_channel,
64 u8 sync_band) 64 u8 sync_band)
65{ 65{
66 struct sk_buff *skb; 66 struct sk_buff *skb;
67 enum ieee80211_band band; 67 enum nl80211_band band;
68 int freq; 68 int freq;
69 69
70 if (sync_band == WLCORE_BAND_5GHZ) 70 if (sync_band == WLCORE_BAND_5GHZ)
71 band = IEEE80211_BAND_5GHZ; 71 band = NL80211_BAND_5GHZ;
72 else 72 else
73 band = IEEE80211_BAND_2GHZ; 73 band = NL80211_BAND_2GHZ;
74 74
75 freq = ieee80211_channel_to_frequency(sync_channel, band); 75 freq = ieee80211_channel_to_frequency(sync_channel, band);
76 76
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 1bf26cc7374e..ae47c79cb9b6 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -137,8 +137,8 @@ static const u8 wl18xx_rate_to_idx_5ghz[] = {
137}; 137};
138 138
139static const u8 *wl18xx_band_rate_to_idx[] = { 139static const u8 *wl18xx_band_rate_to_idx[] = {
140 [IEEE80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz, 140 [NL80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
141 [IEEE80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz 141 [NL80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
142}; 142};
143 143
144enum wl18xx_hw_rates { 144enum wl18xx_hw_rates {
@@ -1302,12 +1302,12 @@ static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
1302 wl1271_debug(DEBUG_ACX, "using wide channel rate mask"); 1302 wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
1303 1303
1304 /* sanity check - we don't support this */ 1304 /* sanity check - we don't support this */
1305 if (WARN_ON(wlvif->band != IEEE80211_BAND_5GHZ)) 1305 if (WARN_ON(wlvif->band != NL80211_BAND_5GHZ))
1306 return 0; 1306 return 0;
1307 1307
1308 return CONF_TX_RATE_USE_WIDE_CHAN; 1308 return CONF_TX_RATE_USE_WIDE_CHAN;
1309 } else if (wl18xx_is_mimo_supported(wl) && 1309 } else if (wl18xx_is_mimo_supported(wl) &&
1310 wlvif->band == IEEE80211_BAND_2GHZ) { 1310 wlvif->band == NL80211_BAND_2GHZ) {
1311 wl1271_debug(DEBUG_ACX, "using MIMO rate mask"); 1311 wl1271_debug(DEBUG_ACX, "using MIMO rate mask");
1312 /* 1312 /*
1313 * we don't care about HT channel here - if a peer doesn't 1313 * we don't care about HT channel here - if a peer doesn't
@@ -1996,24 +1996,24 @@ static int wl18xx_setup(struct wl1271 *wl)
1996 * siso40. 1996 * siso40.
1997 */ 1997 */
1998 if (wl18xx_is_mimo_supported(wl)) 1998 if (wl18xx_is_mimo_supported(wl))
1999 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, 1999 wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
2000 &wl18xx_mimo_ht_cap_2ghz); 2000 &wl18xx_mimo_ht_cap_2ghz);
2001 else 2001 else
2002 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, 2002 wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
2003 &wl18xx_siso40_ht_cap_2ghz); 2003 &wl18xx_siso40_ht_cap_2ghz);
2004 2004
2005 /* 5Ghz is always wide */ 2005 /* 5Ghz is always wide */
2006 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, 2006 wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
2007 &wl18xx_siso40_ht_cap_5ghz); 2007 &wl18xx_siso40_ht_cap_5ghz);
2008 } else if (priv->conf.ht.mode == HT_MODE_WIDE) { 2008 } else if (priv->conf.ht.mode == HT_MODE_WIDE) {
2009 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, 2009 wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
2010 &wl18xx_siso40_ht_cap_2ghz); 2010 &wl18xx_siso40_ht_cap_2ghz);
2011 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, 2011 wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
2012 &wl18xx_siso40_ht_cap_5ghz); 2012 &wl18xx_siso40_ht_cap_5ghz);
2013 } else if (priv->conf.ht.mode == HT_MODE_SISO20) { 2013 } else if (priv->conf.ht.mode == HT_MODE_SISO20) {
2014 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, 2014 wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
2015 &wl18xx_siso20_ht_cap); 2015 &wl18xx_siso20_ht_cap);
2016 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, 2016 wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
2017 &wl18xx_siso20_ht_cap); 2017 &wl18xx_siso20_ht_cap);
2018 } 2018 }
2019 2019
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index bc15aa2c3efa..4e5221544354 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -110,7 +110,7 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
110 110
111 /* TODO: per-band ies? */ 111 /* TODO: per-band ies? */
112 if (cmd->active[0]) { 112 if (cmd->active[0]) {
113 u8 band = IEEE80211_BAND_2GHZ; 113 u8 band = NL80211_BAND_2GHZ;
114 ret = wl12xx_cmd_build_probe_req(wl, wlvif, 114 ret = wl12xx_cmd_build_probe_req(wl, wlvif,
115 cmd->role_id, band, 115 cmd->role_id, band,
116 req->ssids ? req->ssids[0].ssid : NULL, 116 req->ssids ? req->ssids[0].ssid : NULL,
@@ -127,7 +127,7 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
127 } 127 }
128 128
129 if (cmd->active[1] || cmd->dfs) { 129 if (cmd->active[1] || cmd->dfs) {
130 u8 band = IEEE80211_BAND_5GHZ; 130 u8 band = NL80211_BAND_5GHZ;
131 ret = wl12xx_cmd_build_probe_req(wl, wlvif, 131 ret = wl12xx_cmd_build_probe_req(wl, wlvif,
132 cmd->role_id, band, 132 cmd->role_id, band,
133 req->ssids ? req->ssids[0].ssid : NULL, 133 req->ssids ? req->ssids[0].ssid : NULL,
@@ -253,7 +253,7 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
253 cmd->terminate_on_report = 0; 253 cmd->terminate_on_report = 0;
254 254
255 if (cmd->active[0]) { 255 if (cmd->active[0]) {
256 u8 band = IEEE80211_BAND_2GHZ; 256 u8 band = NL80211_BAND_2GHZ;
257 ret = wl12xx_cmd_build_probe_req(wl, wlvif, 257 ret = wl12xx_cmd_build_probe_req(wl, wlvif,
258 cmd->role_id, band, 258 cmd->role_id, band,
259 req->ssids ? req->ssids[0].ssid : NULL, 259 req->ssids ? req->ssids[0].ssid : NULL,
@@ -270,7 +270,7 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
270 } 270 }
271 271
272 if (cmd->active[1] || cmd->dfs) { 272 if (cmd->active[1] || cmd->dfs) {
273 u8 band = IEEE80211_BAND_5GHZ; 273 u8 band = NL80211_BAND_5GHZ;
274 ret = wl12xx_cmd_build_probe_req(wl, wlvif, 274 ret = wl12xx_cmd_build_probe_req(wl, wlvif,
275 cmd->role_id, band, 275 cmd->role_id, band,
276 req->ssids ? req->ssids[0].ssid : NULL, 276 req->ssids ? req->ssids[0].ssid : NULL,
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 3406ffb53325..ebaf66ef3f84 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -43,7 +43,7 @@ void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
43 43
44 if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) { 44 if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) {
45 rate->idx = fw_rate; 45 rate->idx = fw_rate;
46 if (band == IEEE80211_BAND_5GHZ) 46 if (band == NL80211_BAND_5GHZ)
47 rate->idx -= CONF_HW_RATE_INDEX_6MBPS; 47 rate->idx -= CONF_HW_RATE_INDEX_6MBPS;
48 rate->flags = 0; 48 rate->flags = 0;
49 } else { 49 } else {
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index f01d24baff7c..33153565ad62 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -423,7 +423,7 @@ EXPORT_SYMBOL_GPL(wlcore_get_native_channel_type);
423 423
424static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, 424static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
425 struct wl12xx_vif *wlvif, 425 struct wl12xx_vif *wlvif,
426 enum ieee80211_band band, 426 enum nl80211_band band,
427 int channel) 427 int channel)
428{ 428{
429 struct wl12xx_cmd_role_start *cmd; 429 struct wl12xx_cmd_role_start *cmd;
@@ -438,7 +438,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
438 wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id); 438 wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
439 439
440 cmd->role_id = wlvif->dev_role_id; 440 cmd->role_id = wlvif->dev_role_id;
441 if (band == IEEE80211_BAND_5GHZ) 441 if (band == NL80211_BAND_5GHZ)
442 cmd->band = WLCORE_BAND_5GHZ; 442 cmd->band = WLCORE_BAND_5GHZ;
443 cmd->channel = channel; 443 cmd->channel = channel;
444 444
@@ -524,7 +524,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
524 wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id); 524 wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
525 525
526 cmd->role_id = wlvif->role_id; 526 cmd->role_id = wlvif->role_id;
527 if (wlvif->band == IEEE80211_BAND_5GHZ) 527 if (wlvif->band == NL80211_BAND_5GHZ)
528 cmd->band = WLCORE_BAND_5GHZ; 528 cmd->band = WLCORE_BAND_5GHZ;
529 cmd->channel = wlvif->channel; 529 cmd->channel = wlvif->channel;
530 cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); 530 cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
@@ -693,10 +693,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
693 cmd->ap.local_rates = cpu_to_le32(supported_rates); 693 cmd->ap.local_rates = cpu_to_le32(supported_rates);
694 694
695 switch (wlvif->band) { 695 switch (wlvif->band) {
696 case IEEE80211_BAND_2GHZ: 696 case NL80211_BAND_2GHZ:
697 cmd->band = WLCORE_BAND_2_4GHZ; 697 cmd->band = WLCORE_BAND_2_4GHZ;
698 break; 698 break;
699 case IEEE80211_BAND_5GHZ: 699 case NL80211_BAND_5GHZ:
700 cmd->band = WLCORE_BAND_5GHZ; 700 cmd->band = WLCORE_BAND_5GHZ;
701 break; 701 break;
702 default: 702 default:
@@ -773,7 +773,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
773 wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id); 773 wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
774 774
775 cmd->role_id = wlvif->role_id; 775 cmd->role_id = wlvif->role_id;
776 if (wlvif->band == IEEE80211_BAND_5GHZ) 776 if (wlvif->band == NL80211_BAND_5GHZ)
777 cmd->band = WLCORE_BAND_5GHZ; 777 cmd->band = WLCORE_BAND_5GHZ;
778 cmd->channel = wlvif->channel; 778 cmd->channel = wlvif->channel;
779 cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); 779 cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
@@ -1164,7 +1164,7 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1164 } 1164 }
1165 1165
1166 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); 1166 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
1167 if (band == IEEE80211_BAND_2GHZ) 1167 if (band == NL80211_BAND_2GHZ)
1168 ret = wl1271_cmd_template_set(wl, role_id, 1168 ret = wl1271_cmd_template_set(wl, role_id,
1169 template_id_2_4, 1169 template_id_2_4,
1170 skb->data, skb->len, 0, rate); 1170 skb->data, skb->len, 0, rate);
@@ -1195,7 +1195,7 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
1195 wl1271_debug(DEBUG_SCAN, "set ap probe request template"); 1195 wl1271_debug(DEBUG_SCAN, "set ap probe request template");
1196 1196
1197 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]); 1197 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
1198 if (wlvif->band == IEEE80211_BAND_2GHZ) 1198 if (wlvif->band == NL80211_BAND_2GHZ)
1199 ret = wl1271_cmd_template_set(wl, wlvif->role_id, 1199 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
1200 CMD_TEMPL_CFG_PROBE_REQ_2_4, 1200 CMD_TEMPL_CFG_PROBE_REQ_2_4,
1201 skb->data, skb->len, 0, rate); 1201 skb->data, skb->len, 0, rate);
@@ -1628,19 +1628,19 @@ out:
1628 return ret; 1628 return ret;
1629} 1629}
1630 1630
1631static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch) 1631static int wlcore_get_reg_conf_ch_idx(enum nl80211_band band, u16 ch)
1632{ 1632{
1633 /* 1633 /*
1634 * map the given band/channel to the respective predefined 1634 * map the given band/channel to the respective predefined
1635 * bit expected by the fw 1635 * bit expected by the fw
1636 */ 1636 */
1637 switch (band) { 1637 switch (band) {
1638 case IEEE80211_BAND_2GHZ: 1638 case NL80211_BAND_2GHZ:
1639 /* channels 1..14 are mapped to 0..13 */ 1639 /* channels 1..14 are mapped to 0..13 */
1640 if (ch >= 1 && ch <= 14) 1640 if (ch >= 1 && ch <= 14)
1641 return ch - 1; 1641 return ch - 1;
1642 break; 1642 break;
1643 case IEEE80211_BAND_5GHZ: 1643 case NL80211_BAND_5GHZ:
1644 switch (ch) { 1644 switch (ch) {
1645 case 8 ... 16: 1645 case 8 ... 16:
1646 /* channels 8,12,16 are mapped to 18,19,20 */ 1646 /* channels 8,12,16 are mapped to 18,19,20 */
@@ -1670,7 +1670,7 @@ static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
1670} 1670}
1671 1671
1672void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel, 1672void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
1673 enum ieee80211_band band) 1673 enum nl80211_band band)
1674{ 1674{
1675 int ch_bit_idx = 0; 1675 int ch_bit_idx = 0;
1676 1676
@@ -1699,7 +1699,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
1699 1699
1700 memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap)); 1700 memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap));
1701 1701
1702 for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) { 1702 for (b = NL80211_BAND_2GHZ; b <= NL80211_BAND_5GHZ; b++) {
1703 band = wiphy->bands[b]; 1703 band = wiphy->bands[b];
1704 for (i = 0; i < band->n_channels; i++) { 1704 for (i = 0; i < band->n_channels; i++) {
1705 struct ieee80211_channel *channel = &band->channels[i]; 1705 struct ieee80211_channel *channel = &band->channels[i];
@@ -1851,7 +1851,7 @@ out:
1851} 1851}
1852 1852
1853static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1853static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1854 u8 role_id, enum ieee80211_band band, u8 channel) 1854 u8 role_id, enum nl80211_band band, u8 channel)
1855{ 1855{
1856 struct wl12xx_cmd_roc *cmd; 1856 struct wl12xx_cmd_roc *cmd;
1857 int ret = 0; 1857 int ret = 0;
@@ -1870,10 +1870,10 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1870 cmd->role_id = role_id; 1870 cmd->role_id = role_id;
1871 cmd->channel = channel; 1871 cmd->channel = channel;
1872 switch (band) { 1872 switch (band) {
1873 case IEEE80211_BAND_2GHZ: 1873 case NL80211_BAND_2GHZ:
1874 cmd->band = WLCORE_BAND_2_4GHZ; 1874 cmd->band = WLCORE_BAND_2_4GHZ;
1875 break; 1875 break;
1876 case IEEE80211_BAND_5GHZ: 1876 case NL80211_BAND_5GHZ:
1877 cmd->band = WLCORE_BAND_5GHZ; 1877 cmd->band = WLCORE_BAND_5GHZ;
1878 break; 1878 break;
1879 default: 1879 default:
@@ -1925,7 +1925,7 @@ out:
1925} 1925}
1926 1926
1927int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id, 1927int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
1928 enum ieee80211_band band, u8 channel) 1928 enum nl80211_band band, u8 channel)
1929{ 1929{
1930 int ret = 0; 1930 int ret = 0;
1931 1931
@@ -1995,7 +1995,7 @@ out:
1995 1995
1996/* start dev role and roc on its channel */ 1996/* start dev role and roc on its channel */
1997int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1997int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1998 enum ieee80211_band band, int channel) 1998 enum nl80211_band band, int channel)
1999{ 1999{
2000 int ret; 2000 int ret;
2001 2001
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index e28e2f2303ce..52c3b4860461 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -40,7 +40,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
40int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif); 40int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
41int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif); 41int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
42int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, 42int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
43 enum ieee80211_band band, int channel); 43 enum nl80211_band band, int channel);
44int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif); 44int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
45int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 45int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
46int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, 46int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
@@ -83,14 +83,14 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
83int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif, 83int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
84 u8 hlid); 84 u8 hlid);
85int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id, 85int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
86 enum ieee80211_band band, u8 channel); 86 enum nl80211_band band, u8 channel);
87int wl12xx_croc(struct wl1271 *wl, u8 role_id); 87int wl12xx_croc(struct wl1271 *wl, u8 role_id);
88int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, 88int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
89 struct ieee80211_sta *sta, u8 hlid); 89 struct ieee80211_sta *sta, u8 hlid);
90int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, 90int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
91 u8 hlid); 91 u8 hlid);
92void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel, 92void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
93 enum ieee80211_band band); 93 enum nl80211_band band);
94int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl); 94int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
95int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, 95int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
96 u8 feature, u8 enable, u8 value); 96 u8 feature, u8 enable, u8 value);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index dde36203ca42..10fd24c28ece 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -243,7 +243,7 @@ static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 struct delayed_work *dwork; 243 struct delayed_work *dwork;
244 struct wl1271 *wl; 244 struct wl1271 *wl;
245 245
246 dwork = container_of(work, struct delayed_work, work); 246 dwork = to_delayed_work(work);
247 wl = container_of(dwork, struct wl1271, tx_watchdog_work); 247 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
248 248
249 mutex_lock(&wl->mutex); 249 mutex_lock(&wl->mutex);
@@ -1930,7 +1930,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
1930 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) 1930 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1931 wlcore_enable_interrupts(wl); 1931 wlcore_enable_interrupts(wl);
1932 1932
1933 wl->band = IEEE80211_BAND_2GHZ; 1933 wl->band = NL80211_BAND_2GHZ;
1934 1934
1935 wl->rx_counter = 0; 1935 wl->rx_counter = 0;
1936 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1936 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
@@ -2011,7 +2011,7 @@ static void wlcore_channel_switch_work(struct work_struct *work)
2011 struct wl12xx_vif *wlvif; 2011 struct wl12xx_vif *wlvif;
2012 int ret; 2012 int ret;
2013 2013
2014 dwork = container_of(work, struct delayed_work, work); 2014 dwork = to_delayed_work(work);
2015 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work); 2015 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2016 wl = wlvif->wl; 2016 wl = wlvif->wl;
2017 2017
@@ -2047,7 +2047,7 @@ static void wlcore_connection_loss_work(struct work_struct *work)
2047 struct ieee80211_vif *vif; 2047 struct ieee80211_vif *vif;
2048 struct wl12xx_vif *wlvif; 2048 struct wl12xx_vif *wlvif;
2049 2049
2050 dwork = container_of(work, struct delayed_work, work); 2050 dwork = to_delayed_work(work);
2051 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work); 2051 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2052 wl = wlvif->wl; 2052 wl = wlvif->wl;
2053 2053
@@ -2076,7 +2076,7 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work)
2076 unsigned long time_spare; 2076 unsigned long time_spare;
2077 int ret; 2077 int ret;
2078 2078
2079 dwork = container_of(work, struct delayed_work, work); 2079 dwork = to_delayed_work(work);
2080 wlvif = container_of(dwork, struct wl12xx_vif, 2080 wlvif = container_of(dwork, struct wl12xx_vif,
2081 pending_auth_complete_work); 2081 pending_auth_complete_work);
2082 wl = wlvif->wl; 2082 wl = wlvif->wl;
@@ -2240,8 +2240,8 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2240 wlvif->rate_set = CONF_TX_ENABLED_RATES; 2240 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2241 } 2241 }
2242 2242
2243 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; 2243 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2244 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; 2244 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2245 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT; 2245 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2246 2246
2247 /* 2247 /*
@@ -2330,7 +2330,7 @@ power_off:
2330 * 11a channels if not supported 2330 * 11a channels if not supported
2331 */ 2331 */
2332 if (!wl->enable_11a) 2332 if (!wl->enable_11a)
2333 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0; 2333 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2334 2334
2335 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported", 2335 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2336 wl->enable_11a ? "" : "not "); 2336 wl->enable_11a ? "" : "not ");
@@ -5588,7 +5588,7 @@ static void wlcore_roc_complete_work(struct work_struct *work)
5588 struct wl1271 *wl; 5588 struct wl1271 *wl;
5589 int ret; 5589 int ret;
5590 5590
5591 dwork = container_of(work, struct delayed_work, work); 5591 dwork = to_delayed_work(work);
5592 wl = container_of(dwork, struct wl1271, roc_complete_work); 5592 wl = container_of(dwork, struct wl1271, roc_complete_work);
5593 5593
5594 ret = wlcore_roc_completed(wl); 5594 ret = wlcore_roc_completed(wl);
@@ -5871,7 +5871,7 @@ static const struct ieee80211_ops wl1271_ops = {
5871}; 5871};
5872 5872
5873 5873
5874u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band) 5874u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
5875{ 5875{
5876 u8 idx; 5876 u8 idx;
5877 5877
@@ -6096,21 +6096,21 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
6096 * We keep local copies of the band structs because we need to 6096 * We keep local copies of the band structs because we need to
6097 * modify them on a per-device basis. 6097 * modify them on a per-device basis.
6098 */ 6098 */
6099 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz, 6099 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6100 sizeof(wl1271_band_2ghz)); 6100 sizeof(wl1271_band_2ghz));
6101 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, 6101 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6102 &wl->ht_cap[IEEE80211_BAND_2GHZ], 6102 &wl->ht_cap[NL80211_BAND_2GHZ],
6103 sizeof(*wl->ht_cap)); 6103 sizeof(*wl->ht_cap));
6104 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz, 6104 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6105 sizeof(wl1271_band_5ghz)); 6105 sizeof(wl1271_band_5ghz));
6106 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, 6106 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6107 &wl->ht_cap[IEEE80211_BAND_5GHZ], 6107 &wl->ht_cap[NL80211_BAND_5GHZ],
6108 sizeof(*wl->ht_cap)); 6108 sizeof(*wl->ht_cap));
6109 6109
6110 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 6110 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6111 &wl->bands[IEEE80211_BAND_2GHZ]; 6111 &wl->bands[NL80211_BAND_2GHZ];
6112 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 6112 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6113 &wl->bands[IEEE80211_BAND_5GHZ]; 6113 &wl->bands[NL80211_BAND_5GHZ];
6114 6114
6115 /* 6115 /*
6116 * allow 4 queues per mac address we support + 6116 * allow 4 queues per mac address we support +
@@ -6205,7 +6205,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6205 wl->channel = 0; 6205 wl->channel = 0;
6206 wl->rx_counter = 0; 6206 wl->rx_counter = 0;
6207 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 6207 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6208 wl->band = IEEE80211_BAND_2GHZ; 6208 wl->band = NL80211_BAND_2GHZ;
6209 wl->channel_type = NL80211_CHAN_NO_HT; 6209 wl->channel_type = NL80211_CHAN_NO_HT;
6210 wl->flags = 0; 6210 wl->flags = 0;
6211 wl->sg_enabled = true; 6211 wl->sg_enabled = true;
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 4cd316e61466..b36133b739cb 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -38,7 +38,7 @@ void wl1271_elp_work(struct work_struct *work)
38 struct wl12xx_vif *wlvif; 38 struct wl12xx_vif *wlvif;
39 int ret; 39 int ret;
40 40
41 dwork = container_of(work, struct delayed_work, work); 41 dwork = to_delayed_work(work);
42 wl = container_of(dwork, struct wl1271, elp_work); 42 wl = container_of(dwork, struct wl1271, elp_work);
43 43
44 wl1271_debug(DEBUG_PSM, "elp work"); 44 wl1271_debug(DEBUG_PSM, "elp work");
@@ -202,7 +202,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
202 * enable beacon early termination. 202 * enable beacon early termination.
203 * Not relevant for 5GHz and for high rates. 203 * Not relevant for 5GHz and for high rates.
204 */ 204 */
205 if ((wlvif->band == IEEE80211_BAND_2GHZ) && 205 if ((wlvif->band == NL80211_BAND_2GHZ) &&
206 (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) { 206 (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
207 ret = wl1271_acx_bet_enable(wl, wlvif, true); 207 ret = wl1271_acx_bet_enable(wl, wlvif, true);
208 if (ret < 0) 208 if (ret < 0)
@@ -213,7 +213,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
213 wl1271_debug(DEBUG_PSM, "leaving psm"); 213 wl1271_debug(DEBUG_PSM, "leaving psm");
214 214
215 /* disable beacon early termination */ 215 /* disable beacon early termination */
216 if ((wlvif->band == IEEE80211_BAND_2GHZ) && 216 if ((wlvif->band == NL80211_BAND_2GHZ) &&
217 (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) { 217 (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
218 ret = wl1271_acx_bet_enable(wl, wlvif, false); 218 ret = wl1271_acx_bet_enable(wl, wlvif, false);
219 if (ret < 0) 219 if (ret < 0)
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 34e7e938ede4..c9bd294a0aa6 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -64,9 +64,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
64 memset(status, 0, sizeof(struct ieee80211_rx_status)); 64 memset(status, 0, sizeof(struct ieee80211_rx_status));
65 65
66 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG) 66 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
67 status->band = IEEE80211_BAND_2GHZ; 67 status->band = NL80211_BAND_2GHZ;
68 else 68 else
69 status->band = IEEE80211_BAND_5GHZ; 69 status->band = NL80211_BAND_5GHZ;
70 70
71 status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band); 71 status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band);
72 72
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index f5a7087cfb97..57c0565637d6 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -146,7 +146,7 @@ struct wl1271_rx_descriptor {
146} __packed; 146} __packed;
147 147
148int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status); 148int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status);
149u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 149u8 wl1271_rate_to_idx(int rate, enum nl80211_band band);
150int wl1271_rx_filter_enable(struct wl1271 *wl, 150int wl1271_rx_filter_enable(struct wl1271 *wl,
151 int index, bool enable, 151 int index, bool enable,
152 struct wl12xx_rx_filter *filter); 152 struct wl12xx_rx_filter *filter);
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 1e3d51cd673a..23343643207a 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -38,7 +38,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
38 struct wl12xx_vif *wlvif; 38 struct wl12xx_vif *wlvif;
39 int ret; 39 int ret;
40 40
41 dwork = container_of(work, struct delayed_work, work); 41 dwork = to_delayed_work(work);
42 wl = container_of(dwork, struct wl1271, scan_complete_work); 42 wl = container_of(dwork, struct wl1271, scan_complete_work);
43 43
44 wl1271_debug(DEBUG_SCAN, "Scanning complete"); 44 wl1271_debug(DEBUG_SCAN, "Scanning complete");
@@ -164,7 +164,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
164 struct conf_sched_scan_settings *c = &wl->conf.sched_scan; 164 struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
165 u32 delta_per_probe; 165 u32 delta_per_probe;
166 166
167 if (band == IEEE80211_BAND_5GHZ) 167 if (band == NL80211_BAND_5GHZ)
168 delta_per_probe = c->dwell_time_delta_per_probe_5; 168 delta_per_probe = c->dwell_time_delta_per_probe_5;
169 else 169 else
170 delta_per_probe = c->dwell_time_delta_per_probe; 170 delta_per_probe = c->dwell_time_delta_per_probe;
@@ -215,7 +215,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
215 channels[j].channel = req_channels[i]->hw_value; 215 channels[j].channel = req_channels[i]->hw_value;
216 216
217 if (n_pactive_ch && 217 if (n_pactive_ch &&
218 (band == IEEE80211_BAND_2GHZ) && 218 (band == NL80211_BAND_2GHZ) &&
219 (channels[j].channel >= 12) && 219 (channels[j].channel >= 12) &&
220 (channels[j].channel <= 14) && 220 (channels[j].channel <= 14) &&
221 (flags & IEEE80211_CHAN_NO_IR) && 221 (flags & IEEE80211_CHAN_NO_IR) &&
@@ -266,7 +266,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
266 n_channels, 266 n_channels,
267 n_ssids, 267 n_ssids,
268 cfg->channels_2, 268 cfg->channels_2,
269 IEEE80211_BAND_2GHZ, 269 NL80211_BAND_2GHZ,
270 false, true, 0, 270 false, true, 0,
271 MAX_CHANNELS_2GHZ, 271 MAX_CHANNELS_2GHZ,
272 &n_pactive_ch, 272 &n_pactive_ch,
@@ -277,7 +277,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
277 n_channels, 277 n_channels,
278 n_ssids, 278 n_ssids,
279 cfg->channels_2, 279 cfg->channels_2,
280 IEEE80211_BAND_2GHZ, 280 NL80211_BAND_2GHZ,
281 false, false, 281 false, false,
282 cfg->passive[0], 282 cfg->passive[0],
283 MAX_CHANNELS_2GHZ, 283 MAX_CHANNELS_2GHZ,
@@ -289,7 +289,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
289 n_channels, 289 n_channels,
290 n_ssids, 290 n_ssids,
291 cfg->channels_5, 291 cfg->channels_5,
292 IEEE80211_BAND_5GHZ, 292 NL80211_BAND_5GHZ,
293 false, true, 0, 293 false, true, 0,
294 wl->max_channels_5, 294 wl->max_channels_5,
295 &n_pactive_ch, 295 &n_pactive_ch,
@@ -300,7 +300,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
300 n_channels, 300 n_channels,
301 n_ssids, 301 n_ssids,
302 cfg->channels_5, 302 cfg->channels_5,
303 IEEE80211_BAND_5GHZ, 303 NL80211_BAND_5GHZ,
304 true, true, 304 true, true,
305 cfg->passive[1], 305 cfg->passive[1],
306 wl->max_channels_5, 306 wl->max_channels_5,
@@ -312,7 +312,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
312 n_channels, 312 n_channels,
313 n_ssids, 313 n_ssids,
314 cfg->channels_5, 314 cfg->channels_5,
315 IEEE80211_BAND_5GHZ, 315 NL80211_BAND_5GHZ,
316 false, false, 316 false, false,
317 cfg->passive[1] + cfg->dfs, 317 cfg->passive[1] + cfg->dfs,
318 wl->max_channels_5, 318 wl->max_channels_5,
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index f0ac36139bcc..c1b8e4e9d70b 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -453,7 +453,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
453} 453}
454 454
455u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 455u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
456 enum ieee80211_band rate_band) 456 enum nl80211_band rate_band)
457{ 457{
458 struct ieee80211_supported_band *band; 458 struct ieee80211_supported_band *band;
459 u32 enabled_rates = 0; 459 u32 enabled_rates = 0;
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 79cb3ff8b71f..e2ba62d92d7a 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -246,9 +246,9 @@ int wlcore_tx_complete(struct wl1271 *wl);
246void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif); 246void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
247void wl12xx_tx_reset(struct wl1271 *wl); 247void wl12xx_tx_reset(struct wl1271 *wl);
248void wl1271_tx_flush(struct wl1271 *wl); 248void wl1271_tx_flush(struct wl1271 *wl);
249u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band); 249u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band);
250u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 250u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
251 enum ieee80211_band rate_band); 251 enum nl80211_band rate_band);
252u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); 252u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
253u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, 253u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
254 struct sk_buff *skb, struct ieee80211_sta *sta); 254 struct sk_buff *skb, struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 72c31a8edcfb..8f28aa02230c 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -342,7 +342,7 @@ struct wl1271 {
342 struct wl12xx_vif *sched_vif; 342 struct wl12xx_vif *sched_vif;
343 343
344 /* The current band */ 344 /* The current band */
345 enum ieee80211_band band; 345 enum nl80211_band band;
346 346
347 struct completion *elp_compl; 347 struct completion *elp_compl;
348 struct delayed_work elp_work; 348 struct delayed_work elp_work;
@@ -517,7 +517,7 @@ void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
517 struct wl1271_station *wl_sta, bool in_conn); 517 struct wl1271_station *wl_sta, bool in_conn);
518 518
519static inline void 519static inline void
520wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band, 520wlcore_set_ht_cap(struct wl1271 *wl, enum nl80211_band band,
521 struct ieee80211_sta_ht_cap *ht_cap) 521 struct ieee80211_sta_ht_cap *ht_cap)
522{ 522{
523 memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap)); 523 memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap));
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 27c56876b2c1..5c4199f3a19a 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -392,7 +392,7 @@ struct wl12xx_vif {
392 u8 ssid_len; 392 u8 ssid_len;
393 393
394 /* The current band */ 394 /* The current band */
395 enum ieee80211_band band; 395 enum nl80211_band band;
396 int channel; 396 int channel;
397 enum nl80211_channel_type channel_type; 397 enum nl80211_channel_type channel_type;
398 398
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index d5c371d77ddf..99de07d14939 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1454,7 +1454,7 @@ static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info,
1454 struct wl3501_card *this = netdev_priv(dev); 1454 struct wl3501_card *this = netdev_priv(dev);
1455 1455
1456 wrqu->freq.m = 100000 * 1456 wrqu->freq.m = 100000 *
1457 ieee80211_channel_to_frequency(this->chan, IEEE80211_BAND_2GHZ); 1457 ieee80211_channel_to_frequency(this->chan, NL80211_BAND_2GHZ);
1458 wrqu->freq.e = 1; 1458 wrqu->freq.e = 1;
1459 return 0; 1459 return 0;
1460} 1460}
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index e539d9b1b562..3e37a045f702 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -1068,7 +1068,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
1068 } 1068 }
1069 1069
1070 stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq; 1070 stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
1071 stats.band = IEEE80211_BAND_2GHZ; 1071 stats.band = NL80211_BAND_2GHZ;
1072 stats.signal = zd_check_signal(hw, status->signal_strength); 1072 stats.signal = zd_check_signal(hw, status->signal_strength);
1073 1073
1074 rate = zd_rx_rate(buffer, status); 1074 rate = zd_rx_rate(buffer, status);
@@ -1395,7 +1395,7 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
1395 mac->band.n_channels = ARRAY_SIZE(zd_channels); 1395 mac->band.n_channels = ARRAY_SIZE(zd_channels);
1396 mac->band.channels = mac->channels; 1396 mac->band.channels = mac->channels;
1397 1397
1398 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band; 1398 hw->wiphy->bands[NL80211_BAND_2GHZ] = &mac->band;
1399 1399
1400 ieee80211_hw_set(hw, MFP_CAPABLE); 1400 ieee80211_hw_set(hw, MFP_CAPABLE);
1401 ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); 1401 ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);